From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- src/pybind/CMakeLists.txt | 80 + src/pybind/ceph_argparse.py | 1707 ++ src/pybind/ceph_daemon.py | 431 + src/pybind/ceph_mgr_repl.py | 132 + src/pybind/cephfs/CMakeLists.txt | 5 + src/pybind/cephfs/MANIFEST.in | 1 + src/pybind/cephfs/c_cephfs.pxd | 163 + src/pybind/cephfs/cephfs.pyx | 2777 ++ src/pybind/cephfs/mock_cephfs.pxi | 259 + src/pybind/cephfs/setup.py | 215 + src/pybind/cephfs/types.pxd | 55 + src/pybind/mgr/.gitignore | 17 + src/pybind/mgr/.pylintrc | 593 + src/pybind/mgr/CMakeLists.txt | 63 + src/pybind/mgr/alerts/__init__.py | 2 + src/pybind/mgr/alerts/module.py | 258 + src/pybind/mgr/balancer/__init__.py | 2 + src/pybind/mgr/balancer/module.py | 1409 + src/pybind/mgr/ceph_module.pyi | 118 + src/pybind/mgr/cephadm/.gitignore | 2 + src/pybind/mgr/cephadm/HACKING.rst | 272 + src/pybind/mgr/cephadm/Vagrantfile | 66 + src/pybind/mgr/cephadm/__init__.py | 10 + src/pybind/mgr/cephadm/agent.py | 471 + src/pybind/mgr/cephadm/autotune.py | 54 + src/pybind/mgr/cephadm/ceph.repo | 23 + src/pybind/mgr/cephadm/configchecks.py | 705 + src/pybind/mgr/cephadm/exchange.py | 164 + src/pybind/mgr/cephadm/http_server.py | 101 + src/pybind/mgr/cephadm/inventory.py | 1565 + src/pybind/mgr/cephadm/migrations.py | 441 + src/pybind/mgr/cephadm/module.py | 3405 +++ src/pybind/mgr/cephadm/offline_watcher.py | 60 + src/pybind/mgr/cephadm/registry.py | 65 + src/pybind/mgr/cephadm/schedule.py | 481 + src/pybind/mgr/cephadm/serve.py | 1680 + src/pybind/mgr/cephadm/service_discovery.py | 239 + src/pybind/mgr/cephadm/services/__init__.py | 0 src/pybind/mgr/cephadm/services/cephadmservice.py | 1254 + src/pybind/mgr/cephadm/services/container.py | 29 + src/pybind/mgr/cephadm/services/ingress.py | 381 + src/pybind/mgr/cephadm/services/iscsi.py | 212 + src/pybind/mgr/cephadm/services/jaeger.py | 73 + src/pybind/mgr/cephadm/services/monitoring.py | 688 + src/pybind/mgr/cephadm/services/nfs.py | 331 + src/pybind/mgr/cephadm/services/nvmeof.py | 93 + src/pybind/mgr/cephadm/services/osd.py | 972 + src/pybind/mgr/cephadm/ssh.py | 369 + src/pybind/mgr/cephadm/ssl_cert_utils.py | 156 + src/pybind/mgr/cephadm/template.py | 109 + .../cephadm/templates/blink_device_light_cmd.j2 | 1 + .../services/alertmanager/alertmanager.yml.j2 | 51 + .../templates/services/alertmanager/web.yml.j2 | 5 + .../services/grafana/ceph-dashboard.yml.j2 | 39 + .../templates/services/grafana/grafana.ini.j2 | 28 + .../templates/services/ingress/haproxy.cfg.j2 | 90 + .../templates/services/ingress/keepalived.conf.j2 | 36 + .../templates/services/iscsi/iscsi-gateway.cfg.j2 | 13 + .../mgr/cephadm/templates/services/loki.yml.j2 | 28 + .../cephadm/templates/services/nfs/ganesha.conf.j2 | 38 + .../templates/services/node-exporter/web.yml.j2 | 3 + .../templates/services/nvmeof/ceph-nvmeof.conf.j2 | 34 + .../services/prometheus/prometheus.yml.j2 | 109 + .../templates/services/prometheus/web.yml.j2 | 5 + .../mgr/cephadm/templates/services/promtail.yml.j2 | 17 + src/pybind/mgr/cephadm/tests/__init__.py | 0 src/pybind/mgr/cephadm/tests/conftest.py | 27 + src/pybind/mgr/cephadm/tests/fixtures.py | 200 + src/pybind/mgr/cephadm/tests/test_autotune.py | 69 + src/pybind/mgr/cephadm/tests/test_cephadm.py | 2709 ++ src/pybind/mgr/cephadm/tests/test_completion.py | 40 + src/pybind/mgr/cephadm/tests/test_configchecks.py | 668 + src/pybind/mgr/cephadm/tests/test_facts.py | 31 + src/pybind/mgr/cephadm/tests/test_migration.py | 340 + src/pybind/mgr/cephadm/tests/test_osd_removal.py | 298 + src/pybind/mgr/cephadm/tests/test_scheduling.py | 1699 ++ .../mgr/cephadm/tests/test_service_discovery.py | 178 + src/pybind/mgr/cephadm/tests/test_services.py | 2725 ++ src/pybind/mgr/cephadm/tests/test_spec.py | 590 + src/pybind/mgr/cephadm/tests/test_ssh.py | 105 + src/pybind/mgr/cephadm/tests/test_template.py | 33 + .../mgr/cephadm/tests/test_tuned_profiles.py | 256 + src/pybind/mgr/cephadm/tests/test_upgrade.py | 481 + src/pybind/mgr/cephadm/tuned_profiles.py | 103 + src/pybind/mgr/cephadm/upgrade.py | 1294 + src/pybind/mgr/cephadm/utils.py | 153 + src/pybind/mgr/cephadm/vagrant.config.example.json | 13 + src/pybind/mgr/cli_api/__init__.py | 10 + src/pybind/mgr/cli_api/module.py | 120 + src/pybind/mgr/cli_api/tests/__init__.py | 0 src/pybind/mgr/cli_api/tests/test_cli_api.py | 40 + src/pybind/mgr/crash/__init__.py | 2 + src/pybind/mgr/crash/module.py | 447 + src/pybind/mgr/dashboard/.coveragerc | 7 + src/pybind/mgr/dashboard/.editorconfig | 29 + src/pybind/mgr/dashboard/.gitignore | 15 + src/pybind/mgr/dashboard/.pylintrc | 541 + src/pybind/mgr/dashboard/CMakeLists.txt | 23 + src/pybind/mgr/dashboard/HACKING.rst | 10 + src/pybind/mgr/dashboard/README.rst | 35 + src/pybind/mgr/dashboard/__init__.py | 60 + src/pybind/mgr/dashboard/api/__init__.py | 0 src/pybind/mgr/dashboard/api/doc.py | 53 + src/pybind/mgr/dashboard/awsauth.py | 169 + src/pybind/mgr/dashboard/cherrypy_backports.py | 199 + .../mgr/dashboard/ci/cephadm/bootstrap-cluster.sh | 39 + .../mgr/dashboard/ci/cephadm/ceph_cluster.yml | 45 + .../dashboard/ci/cephadm/run-cephadm-e2e-tests.sh | 59 + .../mgr/dashboard/ci/cephadm/start-cluster.sh | 80 + .../mgr/dashboard/ci/check_grafana_dashboards.py | 179 + src/pybind/mgr/dashboard/constraints.txt | 7 + src/pybind/mgr/dashboard/controllers/__init__.py | 40 + .../mgr/dashboard/controllers/_api_router.py | 13 + src/pybind/mgr/dashboard/controllers/_auth.py | 18 + .../mgr/dashboard/controllers/_base_controller.py | 315 + src/pybind/mgr/dashboard/controllers/_crud.py | 485 + src/pybind/mgr/dashboard/controllers/_docs.py | 128 + src/pybind/mgr/dashboard/controllers/_endpoint.py | 82 + src/pybind/mgr/dashboard/controllers/_helpers.py | 127 + src/pybind/mgr/dashboard/controllers/_paginate.py | 0 .../mgr/dashboard/controllers/_permissions.py | 60 + .../mgr/dashboard/controllers/_rest_controller.py | 249 + src/pybind/mgr/dashboard/controllers/_router.py | 69 + src/pybind/mgr/dashboard/controllers/_task.py | 84 + src/pybind/mgr/dashboard/controllers/_ui_router.py | 13 + src/pybind/mgr/dashboard/controllers/_version.py | 75 + src/pybind/mgr/dashboard/controllers/auth.py | 122 + src/pybind/mgr/dashboard/controllers/ceph_users.py | 216 + src/pybind/mgr/dashboard/controllers/cephfs.py | 765 + src/pybind/mgr/dashboard/controllers/cluster.py | 101 + .../dashboard/controllers/cluster_configuration.py | 132 + src/pybind/mgr/dashboard/controllers/crush_rule.py | 68 + src/pybind/mgr/dashboard/controllers/daemon.py | 49 + src/pybind/mgr/dashboard/controllers/docs.py | 435 + .../dashboard/controllers/erasure_code_profile.py | 65 + src/pybind/mgr/dashboard/controllers/feedback.py | 120 + .../mgr/dashboard/controllers/frontend_logging.py | 13 + src/pybind/mgr/dashboard/controllers/grafana.py | 49 + src/pybind/mgr/dashboard/controllers/health.py | 302 + src/pybind/mgr/dashboard/controllers/home.py | 148 + src/pybind/mgr/dashboard/controllers/host.py | 514 + src/pybind/mgr/dashboard/controllers/iscsi.py | 1140 + src/pybind/mgr/dashboard/controllers/logs.py | 72 + .../mgr/dashboard/controllers/mgr_modules.py | 196 + src/pybind/mgr/dashboard/controllers/monitor.py | 133 + src/pybind/mgr/dashboard/controllers/nfs.py | 279 + .../mgr/dashboard/controllers/orchestrator.py | 52 + src/pybind/mgr/dashboard/controllers/osd.py | 658 + .../mgr/dashboard/controllers/perf_counters.py | 82 + src/pybind/mgr/dashboard/controllers/pool.py | 353 + src/pybind/mgr/dashboard/controllers/prometheus.py | 173 + src/pybind/mgr/dashboard/controllers/rbd.py | 435 + .../mgr/dashboard/controllers/rbd_mirroring.py | 687 + src/pybind/mgr/dashboard/controllers/rgw.py | 970 + src/pybind/mgr/dashboard/controllers/role.py | 143 + src/pybind/mgr/dashboard/controllers/saml2.py | 113 + src/pybind/mgr/dashboard/controllers/service.py | 95 + src/pybind/mgr/dashboard/controllers/settings.py | 113 + src/pybind/mgr/dashboard/controllers/summary.py | 123 + src/pybind/mgr/dashboard/controllers/task.py | 46 + src/pybind/mgr/dashboard/controllers/telemetry.py | 239 + src/pybind/mgr/dashboard/controllers/user.py | 214 + src/pybind/mgr/dashboard/exceptions.py | 123 + src/pybind/mgr/dashboard/frontend/.browserslistrc | 11 + src/pybind/mgr/dashboard/frontend/.editorconfig | 13 + src/pybind/mgr/dashboard/frontend/.eslintrc.json | 87 + src/pybind/mgr/dashboard/frontend/.gherkin-lintrc | 33 + src/pybind/mgr/dashboard/frontend/.gitignore | 50 + src/pybind/mgr/dashboard/frontend/.htmllintrc | 70 + src/pybind/mgr/dashboard/frontend/.npmrc | 3 + src/pybind/mgr/dashboard/frontend/.prettierignore | 1 + src/pybind/mgr/dashboard/frontend/.prettierrc | 6 + src/pybind/mgr/dashboard/frontend/.stylelintrc | 43 + src/pybind/mgr/dashboard/frontend/CMakeLists.txt | 145 + src/pybind/mgr/dashboard/frontend/angular.json | 292 + .../mgr/dashboard/frontend/applitools.config.js | 20 + src/pybind/mgr/dashboard/frontend/babel.config.js | 11 + src/pybind/mgr/dashboard/frontend/cd.js | 166 + .../mgr/dashboard/frontend/cypress.config.ts | 55 + .../cypress/e2e/a11y/dashboard.e2e-spec.ts | 26 + .../cypress/e2e/a11y/navigation.e2e-spec.ts | 20 + .../frontend/cypress/e2e/block/images.e2e-spec.ts | 92 + .../frontend/cypress/e2e/block/images.po.ts | 110 + .../frontend/cypress/e2e/block/iscsi.e2e-spec.ts | 24 + .../frontend/cypress/e2e/block/iscsi.po.ts | 7 + .../cypress/e2e/block/mirroring.e2e-spec.ts | 117 + .../frontend/cypress/e2e/block/mirroring.po.ts | 61 + .../cypress/e2e/cluster/configuration.e2e-spec.ts | 77 + .../cypress/e2e/cluster/configuration.po.ts | 75 + .../cypress/e2e/cluster/create-cluster.po.ts | 56 + .../cypress/e2e/cluster/crush-map.e2e-spec.ts | 36 + .../frontend/cypress/e2e/cluster/crush-map.po.ts | 13 + .../frontend/cypress/e2e/cluster/hosts.e2e-spec.ts | 34 + .../frontend/cypress/e2e/cluster/hosts.po.ts | 186 + .../frontend/cypress/e2e/cluster/inventory.po.ts | 22 + .../frontend/cypress/e2e/cluster/logs.e2e-spec.ts | 61 + .../frontend/cypress/e2e/cluster/logs.po.ts | 77 + .../cypress/e2e/cluster/mgr-modules.e2e-spec.ts | 77 + .../frontend/cypress/e2e/cluster/mgr-modules.po.ts | 57 + .../cypress/e2e/cluster/monitors.e2e-spec.ts | 61 + .../frontend/cypress/e2e/cluster/monitors.po.ts | 7 + .../frontend/cypress/e2e/cluster/osds.e2e-spec.ts | 56 + .../frontend/cypress/e2e/cluster/osds.po.ts | 84 + .../frontend/cypress/e2e/cluster/services.po.ts | 200 + .../frontend/cypress/e2e/cluster/users.e2e-spec.ts | 46 + .../frontend/cypress/e2e/cluster/users.po.ts | 59 + .../create-cluster/create-cluster.feature.po.ts | 12 + .../cypress/e2e/common/forms-helper.feature.po.ts | 77 + .../cypress/e2e/common/global.feature.po.ts | 40 + .../cypress/e2e/common/grafana.feature.po.ts | 87 + .../cypress/e2e/common/table-helper.feature.po.ts | 135 + .../frontend/cypress/e2e/common/urls.po.ts | 48 + .../e2e/filesystems/filesystems.e2e-spec.feature | 30 + .../filesystems/subvolume-groups.e2e-spec.feature | 51 + .../e2e/filesystems/subvolumes.e2e-spec.feature | 51 + .../cypress/e2e/orchestrator/01-hosts.e2e-spec.ts | 61 + .../e2e/orchestrator/03-inventory.e2e-spec.ts | 25 + .../cypress/e2e/orchestrator/04-osds.e2e-spec.ts | 49 + .../e2e/orchestrator/05-services.e2e-spec.ts | 35 + .../e2e/orchestrator/grafana/grafana.feature | 60 + .../workflow/01-create-cluster-welcome.feature | 26 + .../workflow/02-create-cluster-add-host.feature | 76 + .../03-create-cluster-create-services.e2e-spec.ts | 46 + .../04-create-cluster-create-osds.e2e-spec.ts | 40 + .../workflow/05-create-cluster-review.e2e-spec.ts | 66 + .../workflow/06-cluster-check.e2e-spec.ts | 82 + .../e2e/orchestrator/workflow/07-osds.e2e-spec.ts | 23 + .../e2e/orchestrator/workflow/08-hosts.e2e-spec.ts | 48 + .../orchestrator/workflow/09-services.e2e-spec.ts | 132 + .../workflow/10-nfs-exports.e2e-spec.ts | 82 + .../e2e/orchestrator/workflow/nfs/nfs-export.po.ts | 52 + .../frontend/cypress/e2e/page-helper.po.ts | 309 + .../frontend/cypress/e2e/pools/pools.e2e-spec.ts | 53 + .../frontend/cypress/e2e/pools/pools.po.ts | 70 + .../frontend/cypress/e2e/rgw/buckets.e2e-spec.ts | 66 + .../frontend/cypress/e2e/rgw/buckets.po.ts | 213 + .../frontend/cypress/e2e/rgw/daemons.e2e-spec.ts | 34 + .../frontend/cypress/e2e/rgw/daemons.po.ts | 34 + .../frontend/cypress/e2e/rgw/roles.e2e-spec.ts | 19 + .../dashboard/frontend/cypress/e2e/rgw/roles.po.ts | 37 + .../frontend/cypress/e2e/rgw/users.e2e-spec.ts | 45 + .../dashboard/frontend/cypress/e2e/rgw/users.po.ts | 139 + .../frontend/cypress/e2e/ui/api-docs.e2e-spec.ts | 14 + .../frontend/cypress/e2e/ui/api-docs.po.ts | 5 + .../cypress/e2e/ui/dashboard-v3.e2e-spec.ts | 49 + .../frontend/cypress/e2e/ui/dashboard-v3.po.ts | 20 + .../frontend/cypress/e2e/ui/dashboard.e2e-spec.ts | 141 + .../frontend/cypress/e2e/ui/dashboard.po.ts | 31 + .../frontend/cypress/e2e/ui/language.e2e-spec.ts | 19 + .../frontend/cypress/e2e/ui/language.po.ts | 15 + .../frontend/cypress/e2e/ui/login.e2e-spec.ts | 23 + .../dashboard/frontend/cypress/e2e/ui/login.po.ts | 22 + .../frontend/cypress/e2e/ui/navigation.e2e-spec.ts | 23 + .../frontend/cypress/e2e/ui/navigation.po.ts | 78 + .../cypress/e2e/ui/notification.e2e-spec.ts | 56 + .../frontend/cypress/e2e/ui/notification.po.ts | 45 + .../frontend/cypress/e2e/ui/role-mgmt.e2e-spec.ts | 36 + .../frontend/cypress/e2e/ui/role-mgmt.po.ts | 40 + .../frontend/cypress/e2e/ui/user-mgmt.e2e-spec.ts | 36 + .../frontend/cypress/e2e/ui/user-mgmt.po.ts | 39 + .../cypress/e2e/visualTests/dashboard.vrt-spec.ts | 24 + .../cypress/e2e/visualTests/login.vrt-spec.ts | 19 + .../cypress/fixtures/block-rbd-status.json | 1 + .../cypress/fixtures/nfs-ganesha-status.json | 4 + .../cypress/fixtures/orchestrator/inventory.json | 390 + .../cypress/fixtures/orchestrator/services.json | 523 + .../frontend/cypress/fixtures/rgw-status.json | 1 + .../dashboard/frontend/cypress/plugins/index.js | 26 + .../dashboard/frontend/cypress/support/commands.ts | 130 + .../mgr/dashboard/frontend/cypress/support/e2e.ts | 19 + .../frontend/cypress/support/eyes-index.d.ts | 1 + .../mgr/dashboard/frontend/cypress/tsconfig.json | 17 + .../frontend/dist/en-US/119.066087561586659c.js | 1 + .../frontend/dist/en-US/25.9d84971ea743706b.js | 1 + .../frontend/dist/en-US/3rdpartylicenses.txt | 3545 +++ .../frontend/dist/en-US/803.08339784f3bb5d16.js | 1 + .../dist/en-US/Ceph_Logo.beb815b55d2e7363.svg | 71 + .../assets/Ceph_Ceph_Logo_with_text_red_white.svg | 69 + .../assets/Ceph_Ceph_Logo_with_text_white.svg | 69 + .../frontend/dist/en-US/assets/Ceph_Logo.svg | 71 + .../frontend/dist/en-US/assets/ceph_background.gif | Bin 0 -> 98115 bytes .../frontend/dist/en-US/assets/loading.gif | Bin 0 -> 35386 bytes .../frontend/dist/en-US/assets/logo-mini.png | Bin 0 -> 1811 bytes .../frontend/dist/en-US/assets/prometheus_logo.svg | 50 + .../en-US/ceph_background.3fbdf95cd52530d7.gif | Bin 0 -> 98115 bytes .../mgr/dashboard/frontend/dist/en-US/favicon.ico | Bin 0 -> 1150 bytes .../forkawesome-webfont.23671bdbd055fa7b.woff | Bin 0 -> 115148 bytes .../en-US/forkawesome-webfont.3217b1b06e001045.svg | 2849 ++ .../en-US/forkawesome-webfont.3b3951dce6cf5d60.ttf | Bin 0 -> 188756 bytes .../en-US/forkawesome-webfont.c0fee260bb6fd5fd.eot | Bin 0 -> 188946 bytes .../forkawesome-webfont.d0a4ad9e6369d510.woff2 | Bin 0 -> 91624 bytes .../mgr/dashboard/frontend/dist/en-US/index.html | 23 + .../frontend/dist/en-US/main.a87f559bb03ca0fb.js | 3 + .../dist/en-US/polyfills.374f1f989f34e1be.js | 1 + .../en-US/prometheus_logo.8057911d27be9bb1.svg | 50 + .../dist/en-US/runtime.a53144ca583f6e2c.js | 1 + .../dist/en-US/scripts.177a7ad3f45b4499.js | 1 + .../dist/en-US/styles.5f6140b407c420b8.css | 17 + .../mgr/dashboard/frontend/html-linter.config.json | 12 + src/pybind/mgr/dashboard/frontend/i18n.config.json | 12 + src/pybind/mgr/dashboard/frontend/jest.config.cjs | 39 + src/pybind/mgr/dashboard/frontend/ngcc.config.js | 10 + .../mgr/dashboard/frontend/package-lock.json | 30504 +++++++++++++++++++ src/pybind/mgr/dashboard/frontend/package.json | 137 + .../mgr/dashboard/frontend/proxy.conf.json.sample | 17 + .../frontend/src/app/app-routing.module.ts | 466 + .../dashboard/frontend/src/app/app.component.html | 1 + .../dashboard/frontend/src/app/app.component.scss | 0 .../frontend/src/app/app.component.spec.ts | 25 + .../dashboard/frontend/src/app/app.component.ts | 18 + .../mgr/dashboard/frontend/src/app/app.module.ts | 51 + .../frontend/src/app/ceph/block/block.module.ts | 205 + .../iscsi-setting/iscsi-setting.component.html | 57 + .../iscsi-setting/iscsi-setting.component.scss | 0 .../iscsi-setting/iscsi-setting.component.spec.ts | 37 + .../block/iscsi-setting/iscsi-setting.component.ts | 31 + .../block/iscsi-tabs/iscsi-tabs.component.html | 16 + .../block/iscsi-tabs/iscsi-tabs.component.scss | 0 .../block/iscsi-tabs/iscsi-tabs.component.spec.ts | 28 + .../ceph/block/iscsi-tabs/iscsi-tabs.component.ts | 8 + .../iscsi-target-details.component.html | 41 + .../iscsi-target-details.component.scss | 0 .../iscsi-target-details.component.spec.ts | 207 + .../iscsi-target-details.component.ts | 346 + .../iscsi-target-discovery-modal.component.html | 128 + .../iscsi-target-discovery-modal.component.scss | 0 .../iscsi-target-discovery-modal.component.spec.ts | 133 + .../iscsi-target-discovery-modal.component.ts | 123 + .../iscsi-target-form.component.html | 670 + .../iscsi-target-form.component.scss | 3 + .../iscsi-target-form.component.spec.ts | 593 + .../iscsi-target-form.component.ts | 822 + ...scsi-target-image-settings-modal.component.html | 92 + ...scsi-target-image-settings-modal.component.scss | 0 ...i-target-image-settings-modal.component.spec.ts | 98 + .../iscsi-target-image-settings-modal.component.ts | 87 + .../iscsi-target-iqn-settings-modal.component.html | 32 + .../iscsi-target-iqn-settings-modal.component.scss | 0 ...csi-target-iqn-settings-modal.component.spec.ts | 71 + .../iscsi-target-iqn-settings-modal.component.ts | 60 + .../iscsi-target-list.component.html | 53 + .../iscsi-target-list.component.scss | 0 .../iscsi-target-list.component.spec.ts | 309 + .../iscsi-target-list.component.ts | 242 + .../src/app/ceph/block/iscsi/iscsi.component.html | 53 + .../src/app/ceph/block/iscsi/iscsi.component.scss | 0 .../app/ceph/block/iscsi/iscsi.component.spec.ts | 83 + .../src/app/ceph/block/iscsi/iscsi.component.ts | 117 + .../bootstrap-create-modal.component.html | 87 + .../bootstrap-create-modal.component.scss | 3 + .../bootstrap-create-modal.component.spec.ts | 113 + .../bootstrap-create-modal.component.ts | 153 + .../bootstrap-import-modal.component.html | 96 + .../bootstrap-import-modal.component.scss | 0 .../bootstrap-import-modal.component.spec.ts | 131 + .../bootstrap-import-modal.component.ts | 187 + .../daemon-list/daemon-list.component.html | 13 + .../daemon-list/daemon-list.component.scss | 0 .../daemon-list/daemon-list.component.spec.ts | 28 + .../mirroring/daemon-list/daemon-list.component.ts | 62 + .../mirroring/image-list/image-list.component.html | 76 + .../mirroring/image-list/image-list.component.scss | 0 .../image-list/image-list.component.spec.ts | 36 + .../mirroring/image-list/image-list.component.ts | 106 + .../mirroring/mirror-health-color.pipe.spec.ts | 25 + .../block/mirroring/mirror-health-color.pipe.ts | 17 + .../app/ceph/block/mirroring/mirroring.module.ts | 43 + .../mirroring/overview/overview.component.html | 68 + .../mirroring/overview/overview.component.scss | 0 .../mirroring/overview/overview.component.spec.ts | 79 + .../block/mirroring/overview/overview.component.ts | 121 + .../pool-edit-mode-modal.component.html | 44 + .../pool-edit-mode-modal.component.scss | 0 .../pool-edit-mode-modal.component.spec.ts | 86 + .../pool-edit-mode-modal.component.ts | 111 + .../pool-edit-mode-response.model.ts | 3 + .../pool-edit-peer-modal.component.html | 100 + .../pool-edit-peer-modal.component.scss | 0 .../pool-edit-peer-modal.component.spec.ts | 148 + .../pool-edit-peer-modal.component.ts | 141 + .../pool-edit-peer-response.model.ts | 7 + .../mirroring/pool-list/pool-list.component.html | 33 + .../mirroring/pool-list/pool-list.component.scss | 0 .../pool-list/pool-list.component.spec.ts | 37 + .../mirroring/pool-list/pool-list.component.ts | 188 + .../rbd-configuration-form.component.html | 72 + .../rbd-configuration-form.component.scss | 4 + .../rbd-configuration-form.component.spec.ts | 294 + .../rbd-configuration-form.component.ts | 166 + .../rbd-configuration-list.component.html | 29 + .../rbd-configuration-list.component.scss | 0 .../rbd-configuration-list.component.spec.ts | 99 + .../rbd-configuration-list.component.ts | 65 + .../block/rbd-details/rbd-details.component.html | 184 + .../block/rbd-details/rbd-details.component.scss | 0 .../rbd-details/rbd-details.component.spec.ts | 30 + .../block/rbd-details/rbd-details.component.ts | 31 + .../ceph/block/rbd-form/rbd-feature.interface.ts | 10 + .../block/rbd-form/rbd-form-clone-request.model.ts | 13 + .../block/rbd-form/rbd-form-copy-request.model.ts | 14 + .../rbd-form/rbd-form-create-request.model.ts | 5 + .../block/rbd-form/rbd-form-edit-request.model.ts | 15 + .../app/ceph/block/rbd-form/rbd-form-mode.enum.ts | 5 + .../ceph/block/rbd-form/rbd-form-response.model.ts | 7 + .../ceph/block/rbd-form/rbd-form.component.html | 398 + .../ceph/block/rbd-form/rbd-form.component.scss | 0 .../ceph/block/rbd-form/rbd-form.component.spec.ts | 484 + .../app/ceph/block/rbd-form/rbd-form.component.ts | 831 + .../src/app/ceph/block/rbd-form/rbd-form.model.ts | 26 + .../app/ceph/block/rbd-form/rbd-parent.model.ts | 6 + .../ceph/block/rbd-list/rbd-list.component.html | 141 + .../ceph/block/rbd-list/rbd-list.component.scss | 5 + .../ceph/block/rbd-list/rbd-list.component.spec.ts | 384 + .../app/ceph/block/rbd-list/rbd-list.component.ts | 664 + .../src/app/ceph/block/rbd-list/rbd-model.ts | 15 + .../rbd-namespace-form-modal.component.html | 79 + .../rbd-namespace-form-modal.component.scss | 0 .../rbd-namespace-form-modal.component.spec.ts | 39 + .../rbd-namespace-form-modal.component.ts | 144 + .../rbd-namespace-list.component.html | 18 + .../rbd-namespace-list.component.scss | 0 .../rbd-namespace-list.component.spec.ts | 41 + .../rbd-namespace-list.component.ts | 157 + .../rbd-performance/rbd-performance.component.html | 9 + .../rbd-performance/rbd-performance.component.scss | 0 .../rbd-performance.component.spec.ts | 30 + .../rbd-performance/rbd-performance.component.ts | 8 + .../rbd-snapshot-form-modal.component.html | 61 + .../rbd-snapshot-form-modal.component.scss | 0 .../rbd-snapshot-form-modal.component.spec.ts | 86 + .../rbd-snapshot-form-modal.component.ts | 154 + .../rbd-snapshot-actions.model.ts | 138 + .../rbd-snapshot-list.component.html | 17 + .../rbd-snapshot-list.component.scss | 0 .../rbd-snapshot-list.component.spec.ts | 323 + .../rbd-snapshot-list.component.ts | 338 + .../block/rbd-snapshot-list/rbd-snapshot.model.ts | 9 + .../ceph/block/rbd-tabs/rbd-tabs.component.html | 35 + .../ceph/block/rbd-tabs/rbd-tabs.component.scss | 0 .../ceph/block/rbd-tabs/rbd-tabs.component.spec.ts | 27 + .../app/ceph/block/rbd-tabs/rbd-tabs.component.ts | 18 + .../rbd-trash-list/rbd-trash-list.component.html | 52 + .../rbd-trash-list/rbd-trash-list.component.scss | 0 .../rbd-trash-list.component.spec.ts | 172 + .../rbd-trash-list/rbd-trash-list.component.ts | 225 + .../rbd-trash-move-modal.component.html | 57 + .../rbd-trash-move-modal.component.scss | 0 .../rbd-trash-move-modal.component.spec.ts | 94 + .../rbd-trash-move-modal.component.ts | 94 + .../rbd-trash-purge-modal.component.html | 46 + .../rbd-trash-purge-modal.component.scss | 0 .../rbd-trash-purge-modal.component.spec.ts | 105 + .../rbd-trash-purge-modal.component.ts | 74 + .../rbd-trash-restore-modal.component.html | 41 + .../rbd-trash-restore-modal.component.scss | 0 .../rbd-trash-restore-modal.component.spec.ts | 81 + .../rbd-trash-restore-modal.component.ts | 65 + .../dashboard/frontend/src/app/ceph/ceph.module.ts | 23 + .../cephfs-chart/cephfs-chart.component.html | 12 + .../cephfs-chart/cephfs-chart.component.scss | 8 + .../cephfs-chart/cephfs-chart.component.spec.ts | 81 + .../cephfs/cephfs-chart/cephfs-chart.component.ts | 196 + .../cephfs-clients/cephfs-clients.component.html | 13 + .../cephfs-clients/cephfs-clients.component.scss | 0 .../cephfs-clients.component.spec.ts | 83 + .../cephfs-clients/cephfs-clients.component.ts | 102 + .../cephfs-detail/cephfs-detail.component.html | 43 + .../cephfs-detail/cephfs-detail.component.scss | 3 + .../cephfs-detail/cephfs-detail.component.spec.ts | 55 + .../cephfs-detail/cephfs-detail.component.ts | 91 + .../cephfs-directories.component.html | 75 + .../cephfs-directories.component.scss | 17 + .../cephfs-directories.component.spec.ts | 1111 + .../cephfs-directories.component.ts | 738 + .../cephfs/cephfs-form/cephfs-form.component.html | 105 + .../cephfs/cephfs-form/cephfs-form.component.scss | 0 .../cephfs-form/cephfs-form.component.spec.ts | 82 + .../cephfs/cephfs-form/cephfs-form.component.ts | 197 + .../cephfs/cephfs-list/cephfs-list.component.html | 22 + .../cephfs/cephfs-list/cephfs-list.component.scss | 0 .../cephfs-list/cephfs-list.component.spec.ts | 97 + .../cephfs/cephfs-list/cephfs-list.component.ts | 153 + .../cephfs-subvolume-form.component.html | 186 + .../cephfs-subvolume-form.component.scss | 0 .../cephfs-subvolume-form.component.spec.ts | 77 + .../cephfs-subvolume-form.component.ts | 216 + .../cephfs-subvolume-group.component.html | 54 + .../cephfs-subvolume-group.component.scss | 0 .../cephfs-subvolume-group.component.spec.ts | 28 + .../cephfs-subvolume-group.component.ts | 178 + .../cephfs-subvolume-list.component.html | 123 + .../cephfs-subvolume-list.component.scss | 0 .../cephfs-subvolume-list.component.spec.ts | 30 + .../cephfs-subvolume-list.component.ts | 241 + .../cephfs-subvolumegroup-form.component.html | 148 + .../cephfs-subvolumegroup-form.component.scss | 0 .../cephfs-subvolumegroup-form.component.spec.ts | 38 + .../cephfs-subvolumegroup-form.component.ts | 198 + .../cephfs/cephfs-tabs/cephfs-tabs.component.html | 67 + .../cephfs/cephfs-tabs/cephfs-tabs.component.scss | 0 .../cephfs-tabs/cephfs-tabs.component.spec.ts | 215 + .../cephfs/cephfs-tabs/cephfs-tabs.component.ts | 130 + .../frontend/src/app/ceph/cephfs/cephfs.module.ts | 51 + .../src/app/ceph/cluster/cluster.module.ts | 131 + .../configuration-details.component.html | 105 + .../configuration-details.component.scss | 0 .../configuration-details.component.spec.ts | 26 + .../configuration-details.component.ts | 29 + .../configuration-form-create-request.model.ts | 4 + .../configuration-form.component.html | 160 + .../configuration-form.component.scss | 12 + .../configuration-form.component.spec.ts | 100 + .../configuration-form.component.ts | 172 + .../configuration/configuration.component.html | 26 + .../configuration/configuration.component.scss | 16 + .../configuration/configuration.component.spec.ts | 46 + .../configuration/configuration.component.ts | 149 + .../create-cluster-review.component.html | 52 + .../create-cluster-review.component.scss | 5 + .../create-cluster-review.component.spec.ts | 29 + .../create-cluster-review.component.ts | 74 + .../create-cluster/create-cluster.component.html | 103 + .../create-cluster/create-cluster.component.scss | 22 + .../create-cluster.component.spec.ts | 178 + .../create-cluster/create-cluster.component.ts | 248 + .../ceph/cluster/crushmap/crushmap.component.html | 41 + .../ceph/cluster/crushmap/crushmap.component.scss | 3 + .../cluster/crushmap/crushmap.component.spec.ts | 137 + .../ceph/cluster/crushmap/crushmap.component.ts | 122 + .../cluster/hosts/fixtures/host_list_response.json | 32 + .../hosts/host-details/host-details.component.html | 62 + .../hosts/host-details/host-details.component.scss | 0 .../host-details/host-details.component.spec.ts | 68 + .../hosts/host-details/host-details.component.ts | 20 + .../hosts/host-form/host-form.component.html | 108 + .../hosts/host-form/host-form.component.scss | 0 .../hosts/host-form/host-form.component.spec.ts | 168 + .../cluster/hosts/host-form/host-form.component.ts | 174 + .../app/ceph/cluster/hosts/hosts.component.html | 99 + .../app/ceph/cluster/hosts/hosts.component.scss | 0 .../app/ceph/cluster/hosts/hosts.component.spec.ts | 459 + .../src/app/ceph/cluster/hosts/hosts.component.ts | 530 + .../fixtures/inventory_list_response.json | 324 + .../inventory-devices/inventory-device.model.ts | 20 + .../inventory-devices.component.html | 16 + .../inventory-devices.component.scss | 12 + .../inventory-devices.component.spec.ts | 194 + .../inventory-devices.component.ts | 266 + .../ceph/cluster/inventory/inventory-host.model.ts | 6 + .../cluster/inventory/inventory.component.html | 14 + .../cluster/inventory/inventory.component.scss | 0 .../cluster/inventory/inventory.component.spec.ts | 67 + .../ceph/cluster/inventory/inventory.component.ts | 90 + .../src/app/ceph/cluster/logs/logs.component.html | 194 + .../src/app/ceph/cluster/logs/logs.component.scss | 58 + .../app/ceph/cluster/logs/logs.component.spec.ts | 169 + .../src/app/ceph/cluster/logs/logs.component.ts | 194 + .../mgr-module-details.component.html | 4 + .../mgr-module-details.component.scss | 0 .../mgr-module-details.component.spec.ts | 27 + .../mgr-module-details.component.ts | 25 + .../mgr-module-form/mgr-module-form.component.html | 110 + .../mgr-module-form/mgr-module-form.component.scss | 0 .../mgr-module-form.component.spec.ts | 80 + .../mgr-module-form/mgr-module-form.component.ts | 135 + .../mgr-module-list/mgr-module-list.component.html | 20 + .../mgr-module-list/mgr-module-list.component.scss | 0 .../mgr-module-list.component.spec.ts | 155 + .../mgr-module-list/mgr-module-list.component.ts | 198 + .../ceph/cluster/mgr-modules/mgr-modules.module.ts | 17 + .../ceph/cluster/monitor/monitor.component.html | 65 + .../ceph/cluster/monitor/monitor.component.scss | 0 .../ceph/cluster/monitor/monitor.component.spec.ts | 105 + .../app/ceph/cluster/monitor/monitor.component.ts | 74 + .../osd-creation-preview-modal.component.html | 20 + .../osd-creation-preview-modal.component.scss | 0 .../osd-creation-preview-modal.component.spec.ts | 38 + .../osd-creation-preview-modal.component.ts | 62 + .../osd/osd-details/osd-details.component.html | 72 + .../osd/osd-details/osd-details.component.scss | 0 .../osd/osd-details/osd-details.component.spec.ts | 31 + .../osd/osd-details/osd-details.component.ts | 44 + .../devices-selection-change-event.interface.ts | 5 + .../devices-selection-clear-event.interface.ts | 6 + .../osd-devices-selection-groups.component.html | 51 + .../osd-devices-selection-groups.component.scss | 3 + .../osd-devices-selection-groups.component.spec.ts | 125 + .../osd-devices-selection-groups.component.ts | 140 + .../osd-devices-selection-modal.component.html | 43 + .../osd-devices-selection-modal.component.scss | 0 .../osd-devices-selection-modal.component.spec.ts | 109 + .../osd-devices-selection-modal.component.ts | 102 + .../osd-flags-indiv-modal.component.html | 48 + .../osd-flags-indiv-modal.component.scss | 0 .../osd-flags-indiv-modal.component.spec.ts | 353 + .../osd-flags-indiv-modal.component.ts | 134 + .../osd-flags-modal/osd-flags-modal.component.html | 41 + .../osd-flags-modal/osd-flags-modal.component.scss | 0 .../osd-flags-modal.component.spec.ts | 99 + .../osd-flags-modal/osd-flags-modal.component.ts | 156 + .../ceph/cluster/osd/osd-form/drive-group.model.ts | 97 + .../cluster/osd/osd-form/osd-feature.interface.ts | 4 + .../cluster/osd/osd-form/osd-form.component.html | 218 + .../cluster/osd/osd-form/osd-form.component.scss | 0 .../osd/osd-form/osd-form.component.spec.ts | 309 + .../cluster/osd/osd-form/osd-form.component.ts | 286 + .../osd/osd-list/fixtures/osd_list_response.json | 605 + .../cluster/osd/osd-list/osd-list.component.html | 154 + .../cluster/osd/osd-list/osd-list.component.scss | 0 .../osd/osd-list/osd-list.component.spec.ts | 641 + .../cluster/osd/osd-list/osd-list.component.ts | 624 + .../osd-pg-scrub-modal.component.html | 45 + .../osd-pg-scrub-modal.component.scss | 0 .../osd-pg-scrub-modal.component.spec.ts | 64 + .../osd-pg-scrub-modal.component.ts | 68 + .../osd-pg-scrub-modal.options.ts | 38 + .../osd-recv-speed-modal.component.html | 92 + .../osd-recv-speed-modal.component.scss | 0 .../osd-recv-speed-modal.component.spec.ts | 317 + .../osd-recv-speed-modal.component.ts | 238 + .../osd-reweight-modal.component.html | 38 + .../osd-reweight-modal.component.scss | 0 .../osd-reweight-modal.component.spec.ts | 56 + .../osd-reweight-modal.component.ts | 43 + .../osd-scrub-modal/osd-scrub-modal.component.html | 22 + .../osd-scrub-modal/osd-scrub-modal.component.scss | 0 .../osd-scrub-modal.component.spec.ts | 50 + .../osd-scrub-modal/osd-scrub-modal.component.ts | 52 + .../active-alert-list.component.html | 41 + .../active-alert-list.component.scss | 0 .../active-alert-list.component.spec.ts | 103 + .../active-alert-list.component.ts | 113 + .../prometheus-tabs/prometheus-tabs.component.html | 30 + .../prometheus-tabs/prometheus-tabs.component.scss | 0 .../prometheus-tabs.component.spec.ts | 29 + .../prometheus-tabs/prometheus-tabs.component.ts | 12 + .../rules-list/rules-list.component.html | 22 + .../rules-list/rules-list.component.scss | 0 .../rules-list/rules-list.component.spec.ts | 40 + .../prometheus/rules-list/rules-list.component.ts | 69 + .../silence-form/silence-form.component.html | 211 + .../silence-form/silence-form.component.scss | 3 + .../silence-form/silence-form.component.spec.ts | 593 + .../silence-form/silence-form.component.ts | 349 + .../silence-list/silence-list.component.html | 34 + .../silence-list/silence-list.component.scss | 0 .../silence-list/silence-list.component.spec.ts | 149 + .../silence-list/silence-list.component.ts | 225 + .../silence-matcher-modal.component.html | 85 + .../silence-matcher-modal.component.scss | 0 .../silence-matcher-modal.component.spec.ts | 209 + .../silence-matcher-modal.component.ts | 107 + .../ceph/cluster/services/placement.pipe.spec.ts | 78 + .../app/ceph/cluster/services/placement.pipe.ts | 41 + .../service-daemon-list.component.html | 102 + .../service-daemon-list.component.scss | 14 + .../service-daemon-list.component.spec.ts | 264 + .../service-daemon-list.component.ts | 356 + .../service-details/service-details.component.html | 4 + .../service-details/service-details.component.scss | 0 .../service-details.component.spec.ts | 43 + .../service-details/service-details.component.ts | 17 + .../service-form/service-form.component.html | 824 + .../service-form/service-form.component.scss | 0 .../service-form/service-form.component.spec.ts | 592 + .../service-form/service-form.component.ts | 874 + .../ceph/cluster/services/services.component.html | 39 + .../ceph/cluster/services/services.component.scss | 0 .../cluster/services/services.component.spec.ts | 105 + .../ceph/cluster/services/services.component.ts | 261 + .../cluster/telemetry/telemetry.component.html | 345 + .../cluster/telemetry/telemetry.component.scss | 0 .../cluster/telemetry/telemetry.component.spec.ts | 322 + .../ceph/cluster/telemetry/telemetry.component.ts | 307 + .../upgrade-start-modal.component.html | 89 + .../upgrade-start-modal.component.scss | 0 .../upgrade-start-modal.component.spec.ts | 32 + .../upgrade-form/upgrade-start-modal.component.ts | 99 + .../upgrade-progress.component.html | 89 + .../upgrade-progress.component.scss | 0 .../upgrade-progress.component.spec.ts | 29 + .../upgrade-progress/upgrade-progress.component.ts | 140 + .../ceph/cluster/upgrade/upgrade.component.html | 233 + .../ceph/cluster/upgrade/upgrade.component.scss | 0 .../ceph/cluster/upgrade/upgrade.component.spec.ts | 230 + .../app/ceph/cluster/upgrade/upgrade.component.ts | 145 + .../dashboard-area-chart.component.html | 39 + .../dashboard-area-chart.component.scss | 19 + .../dashboard-area-chart.component.spec.ts | 36 + .../dashboard-area-chart.component.ts | 307 + .../dashboard-pie/dashboard-pie.component.html | 16 + .../dashboard-pie/dashboard-pie.component.scss | 22 + .../dashboard-pie/dashboard-pie.component.spec.ts | 27 + .../dashboard-pie/dashboard-pie.component.ts | 191 + .../dashboard-time-selector.component.html | 11 + .../dashboard-time-selector.component.scss | 6 + .../dashboard-time-selector.component.spec.ts | 24 + .../dashboard-time-selector.component.ts | 69 + .../app/ceph/dashboard-v3/dashboard-v3.module.ts | 43 + .../dashboard/dashboard-v3.component.html | 309 + .../dashboard/dashboard-v3.component.scss | 33 + .../dashboard/dashboard-v3.component.spec.ts | 328 + .../dashboard/dashboard-v3.component.ts | 164 + .../app/ceph/dashboard-v3/pg-summary.pipe.spec.ts | 36 + .../src/app/ceph/dashboard-v3/pg-summary.pipe.ts | 27 + .../src/app/ceph/dashboard/dashboard.module.ts | 50 + .../dashboard/dashboard/dashboard.component.html | 15 + .../dashboard/dashboard/dashboard.component.scss | 3 + .../dashboard/dashboard.component.spec.ts | 31 + .../dashboard/dashboard/dashboard.component.ts | 16 + .../dashboard/health-pie/health-pie.component.html | 15 + .../dashboard/health-pie/health-pie.component.scss | 22 + .../health-pie/health-pie.component.spec.ts | 75 + .../dashboard/health-pie/health-pie.component.ts | 200 + .../ceph/dashboard/health/health.component.html | 240 + .../ceph/dashboard/health/health.component.scss | 45 + .../ceph/dashboard/health/health.component.spec.ts | 348 + .../app/ceph/dashboard/health/health.component.ts | 280 + .../dashboard/info-card/info-card-popover.scss | 54 + .../dashboard/info-card/info-card.component.html | 18 + .../dashboard/info-card/info-card.component.scss | 44 + .../info-card/info-card.component.spec.ts | 65 + .../dashboard/info-card/info-card.component.ts | 17 + .../dashboard/info-group/info-group.component.html | 17 + .../dashboard/info-group/info-group.component.scss | 14 + .../info-group/info-group.component.spec.ts | 36 + .../dashboard/info-group/info-group.component.ts | 14 + .../app/ceph/dashboard/mds-summary.pipe.spec.ts | 72 + .../src/app/ceph/dashboard/mds-summary.pipe.ts | 78 + .../app/ceph/dashboard/mgr-summary.pipe.spec.ts | 52 + .../src/app/ceph/dashboard/mgr-summary.pipe.ts | 48 + .../app/ceph/dashboard/mon-summary.pipe.spec.ts | 40 + .../src/app/ceph/dashboard/mon-summary.pipe.ts | 17 + .../app/ceph/dashboard/osd-summary.pipe.spec.ts | 193 + .../src/app/ceph/dashboard/osd-summary.pipe.ts | 91 + .../frontend/src/app/ceph/nfs/models/nfs.fsal.ts | 5 + .../nfs/nfs-details/nfs-details.component.html | 32 + .../nfs/nfs-details/nfs-details.component.scss | 0 .../nfs/nfs-details/nfs-details.component.spec.ts | 102 + .../ceph/nfs/nfs-details/nfs-details.component.ts | 68 + .../nfs-form-client/nfs-form-client.component.html | 109 + .../nfs-form-client/nfs-form-client.component.scss | 0 .../nfs-form-client.component.spec.ts | 71 + .../nfs-form-client/nfs-form-client.component.ts | 97 + .../app/ceph/nfs/nfs-form/nfs-form.component.html | 400 + .../app/ceph/nfs/nfs-form/nfs-form.component.scss | 11 + .../ceph/nfs/nfs-form/nfs-form.component.spec.ts | 238 + .../app/ceph/nfs/nfs-form/nfs-form.component.ts | 537 + .../app/ceph/nfs/nfs-list/nfs-list.component.html | 30 + .../app/ceph/nfs/nfs-list/nfs-list.component.scss | 0 .../ceph/nfs/nfs-list/nfs-list.component.spec.ts | 195 + .../app/ceph/nfs/nfs-list/nfs-list.component.ts | 199 + .../frontend/src/app/ceph/nfs/nfs.module.ts | 26 + .../performance-counter.module.ts | 14 + .../performance-counter.component.html | 4 + .../performance-counter.component.scss | 0 .../performance-counter.component.spec.ts | 29 + .../performance-counter.component.ts | 25 + .../table-performance-counter.component.html | 15 + .../table-performance-counter.component.scss | 0 .../table-performance-counter.component.spec.ts | 62 + .../table-performance-counter.component.ts | 72 + .../crush-rule-form-modal.component.html | 123 + .../crush-rule-form-modal.component.scss | 0 .../crush-rule-form-modal.component.spec.ts | 210 + .../crush-rule-form-modal.component.ts | 108 + .../erasure-code-profile-form-modal.component.html | 418 + .../erasure-code-profile-form-modal.component.scss | 0 ...asure-code-profile-form-modal.component.spec.ts | 688 + .../erasure-code-profile-form-modal.component.ts | 459 + .../pool/pool-details/pool-details.component.html | 54 + .../pool/pool-details/pool-details.component.scss | 0 .../pool-details/pool-details.component.spec.ts | 171 + .../pool/pool-details/pool-details.component.ts | 80 + .../src/app/ceph/pool/pool-form/pool-form-data.ts | 37 + .../ceph/pool/pool-form/pool-form.component.html | 618 + .../ceph/pool/pool-form/pool-form.component.scss | 3 + .../pool/pool-form/pool-form.component.spec.ts | 1434 + .../app/ceph/pool/pool-form/pool-form.component.ts | 916 + .../ceph/pool/pool-list/pool-list.component.html | 61 + .../ceph/pool/pool-list/pool-list.component.scss | 19 + .../pool/pool-list/pool-list.component.spec.ts | 518 + .../app/ceph/pool/pool-list/pool-list.component.ts | 332 + .../frontend/src/app/ceph/pool/pool-stat.ts | 16 + .../frontend/src/app/ceph/pool/pool.module.ts | 57 + .../dashboard/frontend/src/app/ceph/pool/pool.ts | 73 + .../create-rgw-service-entities.component.html | 70 + .../create-rgw-service-entities.component.scss | 0 .../create-rgw-service-entities.component.spec.ts | 37 + .../create-rgw-service-entities.component.ts | 99 + .../app/ceph/rgw/models/rgw-bucket-encryption.ts | 7 + .../app/ceph/rgw/models/rgw-bucket-mfa-delete.ts | 4 + .../app/ceph/rgw/models/rgw-bucket-versioning.ts | 4 + .../frontend/src/app/ceph/rgw/models/rgw-daemon.ts | 11 + ...rgw-multisite-zone-deletion-form.component.html | 54 + ...rgw-multisite-zone-deletion-form.component.scss | 9 + ...-multisite-zone-deletion-form.component.spec.ts | 32 + .../rgw-multisite-zone-deletion-form.component.ts | 99 + ...ultisite-zonegroup-deletion-form.component.html | 75 + ...ultisite-zonegroup-deletion-form.component.scss | 9 + ...isite-zonegroup-deletion-form.component.spec.ts | 32 + ...-multisite-zonegroup-deletion-form.component.ts | 106 + .../src/app/ceph/rgw/models/rgw-multisite.ts | 52 + .../app/ceph/rgw/models/rgw-user-capabilities.ts | 15 + .../src/app/ceph/rgw/models/rgw-user-capability.ts | 4 + .../src/app/ceph/rgw/models/rgw-user-s3-key.ts | 6 + .../src/app/ceph/rgw/models/rgw-user-subuser.ts | 6 + .../src/app/ceph/rgw/models/rgw-user-swift-key.ts | 4 + .../rgw-bucket-details.component.html | 94 + .../rgw-bucket-details.component.scss | 7 + .../rgw-bucket-details.component.spec.ts | 43 + .../rgw-bucket-details.component.ts | 24 + .../rgw-bucket-form/rgw-bucket-form.component.html | 397 + .../rgw-bucket-form/rgw-bucket-form.component.scss | 0 .../rgw-bucket-form.component.spec.ts | 300 + .../rgw-bucket-form/rgw-bucket-form.component.ts | 340 + .../rgw-bucket-list/rgw-bucket-list.component.html | 44 + .../rgw-bucket-list/rgw-bucket-list.component.scss | 0 .../rgw-bucket-list.component.spec.ts | 178 + .../rgw-bucket-list/rgw-bucket-list.component.ts | 188 + .../rgw-config-modal.component.html | 235 + .../rgw-config-modal.component.scss | 0 .../rgw-config-modal.component.spec.ts | 38 + .../rgw-config-modal/rgw-config-modal.component.ts | 136 + .../rgw-daemon-details.component.html | 41 + .../rgw-daemon-details.component.scss | 0 .../rgw-daemon-details.component.spec.ts | 42 + .../rgw-daemon-details.component.ts | 46 + .../rgw-daemon-list/rgw-daemon-list.component.html | 52 + .../rgw-daemon-list/rgw-daemon-list.component.scss | 0 .../rgw-daemon-list.component.spec.ts | 107 + .../rgw-daemon-list/rgw-daemon-list.component.ts | 87 + .../rgw-multisite-details.component.html | 121 + .../rgw-multisite-details.component.scss | 13 + .../rgw-multisite-details.component.spec.ts | 43 + .../rgw-multisite-details.component.ts | 592 + .../rgw-multisite-export.component.html | 65 + .../rgw-multisite-export.component.scss | 0 .../rgw-multisite-export.component.spec.ts | 37 + .../rgw-multisite-export.component.ts | 62 + .../rgw-multisite-import.component.html | 182 + .../rgw-multisite-import.component.scss | 0 .../rgw-multisite-import.component.spec.ts | 37 + .../rgw-multisite-import.component.ts | 164 + .../rgw-multisite-migrate.component.html | 154 + .../rgw-multisite-migrate.component.scss | 0 .../rgw-multisite-migrate.component.spec.ts | 37 + .../rgw-multisite-migrate.component.ts | 194 + .../rgw-multisite-realm-form.component.html | 58 + .../rgw-multisite-realm-form.component.scss | 0 .../rgw-multisite-realm-form.component.spec.ts | 94 + .../rgw-multisite-realm-form.component.ts | 131 + .../rgw-multisite-zone-form.component.html | 283 + .../rgw-multisite-zone-form.component.scss | 0 .../rgw-multisite-zone-form.component.spec.ts | 37 + .../rgw-multisite-zone-form.component.ts | 328 + .../rgw-multisite-zonegroup-form.component.html | 205 + .../rgw-multisite-zonegroup-form.component.scss | 0 .../rgw-multisite-zonegroup-form.component.spec.ts | 102 + .../rgw-multisite-zonegroup-form.component.ts | 313 + .../rgw-overview-card-popover.scss | 20 + .../rgw-overview-dashboard.component.html | 185 + .../rgw-overview-dashboard.component.scss | 32 + .../rgw-overview-dashboard.component.spec.ts | 140 + .../rgw-overview-dashboard.component.ts | 166 + .../rgw-sync-data-info.component.html | 51 + .../rgw-sync-data-info.component.scss | 8 + .../rgw-sync-data-info.component.spec.ts | 25 + .../rgw-sync-data-info.component.ts | 16 + .../rgw-sync-metadata-info.component.html | 59 + .../rgw-sync-metadata-info.component.scss | 8 + .../rgw-sync-metadata-info.component.spec.ts | 25 + .../rgw-sync-metadata-info.component.ts | 16 + .../rgw-sync-primary-zone.component.html | 15 + .../rgw-sync-primary-zone.component.scss | 12 + .../rgw-sync-primary-zone.component.spec.ts | 23 + .../rgw-sync-primary-zone.component.ts | 22 + .../rgw-system-user/rgw-system-user.component.html | 37 + .../rgw-system-user/rgw-system-user.component.scss | 0 .../rgw-system-user.component.spec.ts | 37 + .../rgw-system-user/rgw-system-user.component.ts | 50 + .../rgw-user-capability-modal.component.html | 70 + .../rgw-user-capability-modal.component.scss | 0 .../rgw-user-capability-modal.component.spec.ts | 30 + .../rgw-user-capability-modal.component.ts | 92 + .../rgw-user-details.component.html | 165 + .../rgw-user-details.component.scss | 0 .../rgw-user-details.component.spec.ts | 69 + .../rgw-user-details/rgw-user-details.component.ts | 120 + .../rgw/rgw-user-form/rgw-user-form.component.html | 656 + .../rgw/rgw-user-form/rgw-user-form.component.scss | 0 .../rgw-user-form/rgw-user-form.component.spec.ts | 339 + .../rgw/rgw-user-form/rgw-user-form.component.ts | 756 + .../rgw/rgw-user-list/rgw-user-list.component.html | 46 + .../rgw/rgw-user-list/rgw-user-list.component.scss | 0 .../rgw-user-list/rgw-user-list.component.spec.ts | 166 + .../rgw/rgw-user-list/rgw-user-list.component.ts | 180 + .../rgw-user-s3-key-modal.component.html | 121 + .../rgw-user-s3-key-modal.component.scss | 0 .../rgw-user-s3-key-modal.component.spec.ts | 30 + .../rgw-user-s3-key-modal.component.ts | 84 + .../rgw-user-subuser-modal.component.html | 126 + .../rgw-user-subuser-modal.component.scss | 0 .../rgw-user-subuser-modal.component.spec.ts | 71 + .../rgw-user-subuser-modal.component.ts | 130 + .../rgw-user-swift-key-modal.component.html | 52 + .../rgw-user-swift-key-modal.component.scss | 0 .../rgw-user-swift-key-modal.component.spec.ts | 31 + .../rgw-user-swift-key-modal.component.ts | 30 + .../rgw/rgw-user-tabs/rgw-user-tabs.component.html | 18 + .../rgw/rgw-user-tabs/rgw-user-tabs.component.scss | 0 .../rgw-user-tabs/rgw-user-tabs.component.spec.ts | 23 + .../rgw/rgw-user-tabs/rgw-user-tabs.component.ts | 8 + .../frontend/src/app/ceph/rgw/rgw.module.ts | 193 + .../src/app/ceph/shared/ceph-shared.module.ts | 17 + .../shared/device-list/device-list.component.html | 53 + .../shared/device-list/device-list.component.scss | 0 .../device-list/device-list.component.spec.ts | 26 + .../shared/device-list/device-list.component.ts | 89 + .../ceph/shared/feedback/feedback.component.html | 120 + .../ceph/shared/feedback/feedback.component.scss | 0 .../shared/feedback/feedback.component.spec.ts | 73 + .../app/ceph/shared/feedback/feedback.component.ts | 109 + .../src/app/ceph/shared/pg-category.model.ts | 71 + .../app/ceph/shared/pg-category.service.spec.ts | 56 + .../src/app/ceph/shared/pg-category.service.ts | 63 + .../smart_data_version_1_0_ata_response.json | 570 + .../smart_data_version_1_0_nvme_response.json | 134 + .../smart_data_version_1_0_scsi_response.json | 208 + .../shared/smart-list/smart-list.component.html | 110 + .../shared/smart-list/smart-list.component.scss | 0 .../shared/smart-list/smart-list.component.spec.ts | 264 + .../ceph/shared/smart-list/smart-list.component.ts | 212 + .../frontend/src/app/core/auth/auth.module.ts | 87 + .../login-password-form.component.html | 89 + .../login-password-form.component.scss | 73 + .../login-password-form.component.spec.ts | 77 + .../login-password-form.component.ts | 51 + .../src/app/core/auth/login/login.component.html | 66 + .../src/app/core/auth/login/login.component.scss | 54 + .../app/core/auth/login/login.component.spec.ts | 58 + .../src/app/core/auth/login/login.component.ts | 76 + .../auth/role-details/role-details.component.html | 11 + .../auth/role-details/role-details.component.scss | 9 + .../role-details/role-details.component.spec.ts | 67 + .../auth/role-details/role-details.component.ts | 79 + .../app/core/auth/role-form/role-form-mode.enum.ts | 3 + .../core/auth/role-form/role-form.component.html | 75 + .../core/auth/role-form/role-form.component.scss | 4 + .../auth/role-form/role-form.component.spec.ts | 163 + .../app/core/auth/role-form/role-form.component.ts | 186 + .../src/app/core/auth/role-form/role-form.model.ts | 5 + .../core/auth/role-list/role-list.component.html | 21 + .../core/auth/role-list/role-list.component.scss | 0 .../auth/role-list/role-list.component.spec.ts | 83 + .../app/core/auth/role-list/role-list.component.ts | 169 + .../app/core/auth/user-form/user-form-mode.enum.ts | 3 + .../core/auth/user-form/user-form-role.model.ts | 14 + .../core/auth/user-form/user-form.component.html | 257 + .../core/auth/user-form/user-form.component.scss | 0 .../auth/user-form/user-form.component.spec.ts | 258 + .../app/core/auth/user-form/user-form.component.ts | 305 + .../src/app/core/auth/user-form/user-form.model.ts | 10 + .../core/auth/user-list/user-list.component.html | 46 + .../core/auth/user-list/user-list.component.scss | 16 + .../auth/user-list/user-list.component.spec.ts | 97 + .../app/core/auth/user-list/user-list.component.ts | 226 + .../user-password-form.component.html | 115 + .../user-password-form.component.scss | 6 + .../user-password-form.component.spec.ts | 83 + .../user-password-form.component.ts | 119 + .../core/auth/user-tabs/user-tabs.component.html | 18 + .../core/auth/user-tabs/user-tabs.component.scss | 0 .../auth/user-tabs/user-tabs.component.spec.ts | 29 + .../app/core/auth/user-tabs/user-tabs.component.ts | 13 + .../src/app/core/context/context.component.html | 27 + .../src/app/core/context/context.component.scss | 5 + .../src/app/core/context/context.component.spec.ts | 100 + .../src/app/core/context/context.component.ts | 79 + .../dashboard/frontend/src/app/core/core.module.ts | 34 + .../src/app/core/error/error.component.html | 67 + .../src/app/core/error/error.component.scss | 18 + .../src/app/core/error/error.component.spec.ts | 49 + .../frontend/src/app/core/error/error.component.ts | 102 + .../dashboard/frontend/src/app/core/error/error.ts | 27 + .../blank-layout/blank-layout.component.html | 1 + .../blank-layout/blank-layout.component.scss | 0 .../blank-layout/blank-layout.component.spec.ts | 25 + .../layouts/blank-layout/blank-layout.component.ts | 8 + .../login-layout/login-layout.component.html | 34 + .../login-layout/login-layout.component.scss | 61 + .../login-layout/login-layout.component.spec.ts | 28 + .../layouts/login-layout/login-layout.component.ts | 14 + .../workbench-layout.component.html | 10 + .../workbench-layout.component.scss | 16 + .../workbench-layout.component.spec.ts | 35 + .../workbench-layout/workbench-layout.component.ts | 35 + .../app/core/navigation/about/about.component.html | 46 + .../app/core/navigation/about/about.component.scss | 43 + .../core/navigation/about/about.component.spec.ts | 60 + .../app/core/navigation/about/about.component.ts | 70 + .../administration/administration.component.html | 23 + .../administration/administration.component.scss | 0 .../administration.component.spec.ts | 25 + .../administration/administration.component.ts | 22 + .../navigation/api-docs/api-docs.component.html | 3 + .../navigation/api-docs/api-docs.component.scss | 7 + .../core/navigation/api-docs/api-docs.component.ts | 18 + .../breadcrumbs/breadcrumbs.component.html | 11 + .../breadcrumbs/breadcrumbs.component.scss | 12 + .../breadcrumbs/breadcrumbs.component.spec.ts | 171 + .../breadcrumbs/breadcrumbs.component.ts | 157 + .../dashboard-help/dashboard-help.component.html | 29 + .../dashboard-help/dashboard-help.component.scss | 0 .../dashboard-help.component.spec.ts | 27 + .../dashboard-help/dashboard-help.component.ts | 37 + .../navigation/identity/identity.component.html | 28 + .../navigation/identity/identity.component.scss | 0 .../navigation/identity/identity.component.spec.ts | 27 + .../core/navigation/identity/identity.component.ts | 27 + .../src/app/core/navigation/navigation.module.ts | 43 + .../navigation/navigation.component.html | 302 + .../navigation/navigation.component.scss | 268 + .../navigation/navigation.component.spec.ts | 269 + .../navigation/navigation/navigation.component.ts | 123 + .../notifications/notifications.component.html | 11 + .../notifications/notifications.component.scss | 27 + .../notifications/notifications.component.spec.ts | 58 + .../notifications/notifications.component.ts | 47 + .../frontend/src/app/shared/api/api-client.spec.ts | 11 + .../frontend/src/app/shared/api/api-client.ts | 5 + .../src/app/shared/api/auth.service.spec.ts | 57 + .../frontend/src/app/shared/api/auth.service.ts | 53 + .../src/app/shared/api/ceph-service.service.ts | 74 + .../src/app/shared/api/ceph-user.service.ts | 13 + .../api/cephfs-subvolume-group.service.spec.ts | 23 + .../shared/api/cephfs-subvolume-group.service.ts | 79 + .../shared/api/cephfs-subvolume.service.spec.ts | 43 + .../src/app/shared/api/cephfs-subvolume.service.ts | 96 + .../src/app/shared/api/cephfs.service.spec.ts | 114 + .../frontend/src/app/shared/api/cephfs.service.ts | 106 + .../src/app/shared/api/cluster.service.spec.ts | 42 + .../frontend/src/app/shared/api/cluster.service.ts | 27 + .../app/shared/api/configuration.service.spec.ts | 99 + .../src/app/shared/api/configuration.service.ts | 59 + .../src/app/shared/api/crush-rule.service.spec.ts | 47 + .../src/app/shared/api/crush-rule.service.ts | 32 + .../shared/api/custom-login-banner.service.spec.ts | 35 + .../app/shared/api/custom-login-banner.service.ts | 15 + .../src/app/shared/api/daemon.service.spec.ts | 39 + .../frontend/src/app/shared/api/daemon.service.ts | 36 + .../api/erasure-code-profile.service.spec.ts | 55 + .../app/shared/api/erasure-code-profile.service.ts | 110 + .../src/app/shared/api/feedback.service.spec.ts | 47 + .../src/app/shared/api/feedback.service.ts | 38 + .../src/app/shared/api/health.service.spec.ts | 40 + .../frontend/src/app/shared/api/health.service.ts | 29 + .../src/app/shared/api/host.service.spec.ts | 94 + .../frontend/src/app/shared/api/host.service.ts | 165 + .../src/app/shared/api/iscsi.service.spec.ts | 97 + .../frontend/src/app/shared/api/iscsi.service.ts | 60 + .../src/app/shared/api/logging.service.spec.ts | 39 + .../frontend/src/app/shared/api/logging.service.ts | 18 + .../src/app/shared/api/logs.service.spec.ts | 34 + .../frontend/src/app/shared/api/logs.service.ts | 17 + .../src/app/shared/api/mgr-module.service.spec.ts | 66 + .../src/app/shared/api/mgr-module.service.ts | 65 + .../src/app/shared/api/monitor.service.spec.ts | 34 + .../frontend/src/app/shared/api/monitor.service.ts | 13 + .../src/app/shared/api/motd.service.spec.ts | 34 + .../frontend/src/app/shared/api/motd.service.ts | 25 + .../src/app/shared/api/nfs.service.spec.ts | 74 + .../frontend/src/app/shared/api/nfs.service.ts | 108 + .../app/shared/api/orchestrator.service.spec.ts | 35 + .../src/app/shared/api/orchestrator.service.ts | 50 + .../src/app/shared/api/osd.service.spec.ts | 183 + .../frontend/src/app/shared/api/osd.service.ts | 190 + .../frontend/src/app/shared/api/paginate.model.ts | 16 + .../shared/api/performance-counter.service.spec.ts | 45 + .../app/shared/api/performance-counter.service.ts | 29 + .../src/app/shared/api/pool.service.spec.ts | 123 + .../frontend/src/app/shared/api/pool.service.ts | 74 + .../src/app/shared/api/prometheus.service.spec.ts | 247 + .../src/app/shared/api/prometheus.service.ts | 192 + .../app/shared/api/rbd-mirroring.service.spec.ts | 164 + .../src/app/shared/api/rbd-mirroring.service.ts | 118 + .../frontend/src/app/shared/api/rbd.model.ts | 30 + .../src/app/shared/api/rbd.service.spec.ts | 186 + .../frontend/src/app/shared/api/rbd.service.ts | 203 + .../src/app/shared/api/rgw-bucket.service.spec.ts | 126 + .../src/app/shared/api/rgw-bucket.service.ts | 199 + .../src/app/shared/api/rgw-daemon.service.spec.ts | 90 + .../src/app/shared/api/rgw-daemon.service.ts | 93 + .../src/app/shared/api/rgw-multisite.service.ts | 32 + .../src/app/shared/api/rgw-realm.service.spec.ts | 22 + .../src/app/shared/api/rgw-realm.service.ts | 84 + .../src/app/shared/api/rgw-site.service.spec.ts | 43 + .../src/app/shared/api/rgw-site.service.ts | 38 + .../src/app/shared/api/rgw-user.service.spec.ts | 170 + .../src/app/shared/api/rgw-user.service.ts | 179 + .../src/app/shared/api/rgw-zone.service.spec.ts | 22 + .../src/app/shared/api/rgw-zone.service.ts | 168 + .../app/shared/api/rgw-zonegroup.service.spec.ts | 22 + .../src/app/shared/api/rgw-zonegroup.service.ts | 93 + .../src/app/shared/api/role.service.spec.ts | 75 + .../frontend/src/app/shared/api/role.service.ts | 49 + .../src/app/shared/api/scope.service.spec.ts | 34 + .../frontend/src/app/shared/api/scope.service.ts | 13 + .../src/app/shared/api/settings.service.spec.ts | 154 + .../src/app/shared/api/settings.service.ts | 77 + .../src/app/shared/api/telemetry.service.spec.ts | 58 + .../src/app/shared/api/telemetry.service.ts | 23 + .../src/app/shared/api/upgrade.service.spec.ts | 67 + .../frontend/src/app/shared/api/upgrade.service.ts | 78 + .../src/app/shared/api/user.service.spec.ts | 104 + .../frontend/src/app/shared/api/user.service.ts | 62 + .../src/app/shared/classes/cd-helper.class.spec.ts | 66 + .../src/app/shared/classes/cd-helper.class.ts | 28 + .../classes/crush.node.selection.class.spec.ts | 220 + .../shared/classes/crush.node.selection.class.ts | 221 + .../frontend/src/app/shared/classes/css-helper.ts | 5 + .../app/shared/classes/list-with-details.class.ts | 29 + .../shared/classes/table-status-view-cache.spec.ts | 40 + .../app/shared/classes/table-status-view-cache.ts | 37 + .../src/app/shared/classes/table-status.spec.ts | 15 + .../src/app/shared/classes/table-status.ts | 3 + .../alert-panel/alert-panel.component.html | 43 + .../alert-panel/alert-panel.component.scss | 12 + .../alert-panel/alert-panel.component.spec.ts | 26 + .../alert-panel/alert-panel.component.ts | 72 + .../back-button/back-button.component.html | 6 + .../back-button/back-button.component.scss | 0 .../back-button/back-button.component.spec.ts | 25 + .../back-button/back-button.component.ts | 28 + .../components/card-row/card-row.component.html | 171 + .../components/card-row/card-row.component.scss | 4 + .../components/card-row/card-row.component.spec.ts | 22 + .../components/card-row/card-row.component.ts | 34 + .../app/shared/components/card/card.component.html | 24 + .../app/shared/components/card/card.component.scss | 0 .../shared/components/card/card.component.spec.ts | 33 + .../app/shared/components/card/card.component.ts | 28 + .../components/cd-label/cd-label.component.html | 12 + .../components/cd-label/cd-label.component.scss | 0 .../components/cd-label/cd-label.component.spec.ts | 24 + .../components/cd-label/cd-label.component.ts | 12 + .../cd-label/color-class-from-text.pipe.ts | 28 + .../src/app/shared/components/components.module.ts | 143 + .../config-option/config-option.component.html | 75 + .../config-option/config-option.component.scss | 10 + .../config-option/config-option.component.spec.ts | 295 + .../config-option/config-option.component.ts | 120 + .../config-option/config-option.model.ts | 12 + .../config-option/config-option.types.spec.ts | 272 + .../config-option/config-option.types.ts | 147 + .../confirmation-modal.component.html | 28 + .../confirmation-modal.component.scss | 0 .../confirmation-modal.component.spec.ts | 185 + .../confirmation-modal.component.ts | 66 + .../copy2clipboard-button.component.html | 15 + .../copy2clipboard-button.component.scss | 0 .../copy2clipboard-button.component.spec.ts | 65 + .../copy2clipboard-button.component.ts | 58 + .../critical-confirmation-modal.component.html | 56 + .../critical-confirmation-modal.component.scss | 11 + .../critical-confirmation-modal.component.spec.ts | 235 + .../critical-confirmation-modal.component.ts | 76 + .../custom-login-banner.component.html | 2 + .../custom-login-banner.component.scss | 5 + .../custom-login-banner.component.spec.ts | 25 + .../custom-login-banner.component.ts | 20 + .../date-time-picker.component.html | 13 + .../date-time-picker.component.scss | 0 .../date-time-picker.component.spec.ts | 58 + .../date-time-picker/date-time-picker.component.ts | 67 + .../app/shared/components/doc/doc.component.html | 2 + .../app/shared/components/doc/doc.component.scss | 0 .../shared/components/doc/doc.component.spec.ts | 27 + .../src/app/shared/components/doc/doc.component.ts | 28 + .../download-button/download-button.component.html | 23 + .../download-button/download-button.component.scss | 0 .../download-button.component.spec.ts | 39 + .../download-button/download-button.component.ts | 31 + .../form-button-panel.component.html | 12 + .../form-button-panel.component.scss | 0 .../form-button-panel.component.spec.ts | 25 + .../form-button-panel.component.ts | 66 + .../form-modal/form-modal.component.html | 69 + .../form-modal/form-modal.component.scss | 0 .../form-modal/form-modal.component.spec.ts | 149 + .../components/form-modal/form-modal.component.ts | 113 + .../components/grafana/grafana.component.html | 84 + .../components/grafana/grafana.component.scss | 33 + .../components/grafana/grafana.component.spec.ts | 105 + .../shared/components/grafana/grafana.component.ts | 204 + .../shared/components/helper/helper.component.html | 11 + .../shared/components/helper/helper.component.scss | 7 + .../components/helper/helper.component.spec.ts | 26 + .../shared/components/helper/helper.component.ts | 21 + .../language-selector.component.html | 22 + .../language-selector.component.scss | 0 .../language-selector.component.spec.ts | 85 + .../language-selector.component.ts | 40 + .../language-selector/supported-languages.enum.ts | 17 + .../loading-panel/loading-panel.component.html | 9 + .../loading-panel/loading-panel.component.scss | 0 .../loading-panel/loading-panel.component.spec.ts | 26 + .../loading-panel/loading-panel.component.ts | 12 + .../shared/components/modal/modal.component.html | 18 + .../shared/components/modal/modal.component.scss | 23 + .../components/modal/modal.component.spec.ts | 54 + .../app/shared/components/modal/modal.component.ts | 31 + .../app/shared/components/motd/motd.component.html | 8 + .../app/shared/components/motd/motd.component.scss | 0 .../shared/components/motd/motd.component.spec.ts | 26 + .../app/shared/components/motd/motd.component.ts | 33 + .../notifications-sidebar.component.html | 144 + .../notifications-sidebar.component.scss | 64 + .../notifications-sidebar.component.spec.ts | 208 + .../notifications-sidebar.component.ts | 228 + .../orchestrator-doc-panel.component.html | 10 + .../orchestrator-doc-panel.component.scss | 0 .../orchestrator-doc-panel.component.spec.ts | 29 + .../orchestrator-doc-panel.component.ts | 13 + .../pwd-expiration-notification.component.html | 16 + .../pwd-expiration-notification.component.scss | 3 + .../pwd-expiration-notification.component.spec.ts | 107 + .../pwd-expiration-notification.component.ts | 55 + .../refresh-selector.component.html | 19 + .../refresh-selector.component.scss | 0 .../refresh-selector.component.spec.ts | 27 + .../refresh-selector/refresh-selector.component.ts | 32 + .../select-badges/select-badges.component.html | 22 + .../select-badges/select-badges.component.scss | 9 + .../select-badges/select-badges.component.spec.ts | 57 + .../select-badges/select-badges.component.ts | 35 + .../components/select/select-messages.model.ts | 23 + .../components/select/select-option.model.ts | 13 + .../shared/components/select/select.component.html | 79 + .../shared/components/select/select.component.scss | 26 + .../components/select/select.component.spec.ts | 276 + .../shared/components/select/select.component.ts | 149 + .../components/sparkline/sparkline.component.html | 15 + .../components/sparkline/sparkline.component.scss | 5 + .../sparkline/sparkline.component.spec.ts | 52 + .../components/sparkline/sparkline.component.ts | 130 + .../submit-button/submit-button.component.html | 11 + .../submit-button/submit-button.component.scss | 0 .../submit-button/submit-button.component.spec.ts | 27 + .../submit-button/submit-button.component.ts | 99 + .../telemetry-notification.component.html | 12 + .../telemetry-notification.component.scss | 23 + .../telemetry-notification.component.spec.ts | 107 + .../telemetry-notification.component.ts | 62 + .../components/usage-bar/usage-bar.component.html | 45 + .../components/usage-bar/usage-bar.component.scss | 35 + .../usage-bar/usage-bar.component.spec.ts | 27 + .../components/usage-bar/usage-bar.component.ts | 53 + .../shared/components/wizard/wizard.component.html | 19 + .../shared/components/wizard/wizard.component.scss | 34 + .../components/wizard/wizard.component.spec.ts | 25 + .../shared/components/wizard/wizard.component.ts | 39 + .../src/app/shared/constants/app.constants.ts | 330 + .../checked-table-form.component.html | 55 + .../checked-table-form.component.scss | 0 .../checked-table-form.component.spec.ts | 138 + .../checked-table-form.component.ts | 165 + .../datatable/crud-table/crud-table.component.html | 76 + .../datatable/crud-table/crud-table.component.scss | 3 + .../crud-table/crud-table.component.spec.ts | 53 + .../datatable/crud-table/crud-table.component.ts | 177 + .../src/app/shared/datatable/datatable.module.ts | 95 + .../table-actions/table-actions.component.html | 45 + .../table-actions/table-actions.component.scss | 19 + .../table-actions/table-actions.component.spec.ts | 213 + .../table-actions/table-actions.component.ts | 161 + .../table-key-value/table-key-value.component.html | 14 + .../table-key-value/table-key-value.component.scss | 5 + .../table-key-value.component.spec.ts | 352 + .../table-key-value/table-key-value.component.ts | 224 + .../table-pagination.component.html | 58 + .../table-pagination.component.scss | 21 + .../table-pagination.component.spec.ts | 53 + .../table-pagination/table-pagination.component.ts | 110 + .../shared/datatable/table/table.component.html | 356 + .../shared/datatable/table/table.component.scss | 299 + .../shared/datatable/table/table.component.spec.ts | 782 + .../app/shared/datatable/table/table.component.ts | 929 + .../src/app/shared/decorators/cd-encode.spec.ts | 41 + .../src/app/shared/decorators/cd-encode.ts | 80 + .../directives/auth-storage.directive.spec.ts | 104 + .../shared/directives/auth-storage.directive.ts | 48 + .../shared/directives/autofocus.directive.spec.ts | 90 + .../app/shared/directives/autofocus.directive.ts | 28 + .../dimless-binary-per-second.directive.spec.ts | 12 + .../dimless-binary-per-second.directive.ts | 132 + .../directives/dimless-binary.directive.spec.ts | 12 + .../shared/directives/dimless-binary.directive.ts | 128 + .../src/app/shared/directives/directives.module.ts | 56 + .../form-input-disable.directive.spec.ts | 75 + .../directives/form-input-disable.directive.ts | 27 + .../directives/form-loading.directive.spec.ts | 89 + .../shared/directives/form-loading.directive.ts | 40 + .../shared/directives/form-scope.directive.spec.ts | 8 + .../app/shared/directives/form-scope.directive.ts | 8 + .../app/shared/directives/iops.directive.spec.ts | 8 + .../src/app/shared/directives/iops.directive.ts | 31 + .../directives/milliseconds.directive.spec.ts | 8 + .../shared/directives/milliseconds.directive.ts | 31 + .../cd-form-control.directive.spec.ts | 37 + .../cd-form-control.directive.ts | 82 + .../cd-form-group.directive.spec.ts | 37 + .../cd-form-group.directive.ts | 76 + .../cd-form-validation.directive.spec.ts | 35 + .../cd-form-validation.directive.ts | 67 + .../directives/password-button.directive.spec.ts | 8 + .../shared/directives/password-button.directive.ts | 45 + .../directives/stateful-tab.directive.spec.ts | 38 + .../shared/directives/stateful-tab.directive.ts | 34 + .../app/shared/directives/trim.directive.spec.ts | 50 + .../src/app/shared/directives/trim.directive.ts | 21 + .../src/app/shared/enum/cell-template.enum.ts | 64 + .../src/app/shared/enum/components.enum.ts | 9 + .../src/app/shared/enum/dashboard-promqls.enum.ts | 18 + .../src/app/shared/enum/health-color.enum.ts | 5 + .../src/app/shared/enum/health-icon.enum.ts | 11 + .../src/app/shared/enum/health-label.enum.ts | 5 + .../frontend/src/app/shared/enum/icons.enum.ts | 89 + .../src/app/shared/enum/notification-type.enum.ts | 5 + .../src/app/shared/enum/unix_errno.enum.ts | 4 + .../src/app/shared/enum/view-cache-status.enum.ts | 6 + .../src/app/shared/forms/cd-form-builder.spec.ts | 33 + .../src/app/shared/forms/cd-form-builder.ts | 20 + .../src/app/shared/forms/cd-form-group.spec.ts | 184 + .../frontend/src/app/shared/forms/cd-form-group.ts | 75 + .../frontend/src/app/shared/forms/cd-form.spec.ts | 32 + .../frontend/src/app/shared/forms/cd-form.ts | 26 + .../src/app/shared/forms/cd-validators.spec.ts | 906 + .../frontend/src/app/shared/forms/cd-validators.ts | 613 + .../forms/crud-form/crud-form.component.html | 25 + .../forms/crud-form/crud-form.component.scss | 22 + .../forms/crud-form/crud-form.component.spec.ts | 40 + .../shared/forms/crud-form/crud-form.component.ts | 104 + .../app/shared/forms/crud-form/crud-form.model.ts | 15 + .../formly-array-type.component.html | 42 + .../formly-array-type.component.scss | 3 + .../formly-array-type.component.spec.ts | 45 + .../formly-array-type.component.ts | 34 + .../formly-file-type/formly-file-type-accessor.ts | 29 + .../formly-file-type.component.html | 4 + .../formly-file-type.component.scss | 0 .../formly-file-type.component.spec.ts | 39 + .../formly-file-type/formly-file-type.component.ts | 9 + .../formly-input-type.component.html | 3 + .../formly-input-type.component.scss | 0 .../formly-input-type.component.spec.ts | 46 + .../formly-input-type.component.ts | 9 + .../formly-input-wrapper.component.html | 37 + .../formly-input-wrapper.component.scss | 0 .../formly-input-wrapper.component.spec.ts | 46 + .../formly-input-wrapper.component.ts | 15 + .../formly-object-type.component.html | 17 + .../formly-object-type.component.scss | 0 .../formly-object-type.component.spec.ts | 46 + .../formly-object-type.component.ts | 22 + .../formly-textarea-type.component.html | 9 + .../formly-textarea-type.component.scss | 0 .../formly-textarea-type.component.spec.ts | 47 + .../formly-textarea-type.component.ts | 25 + .../src/app/shared/forms/crud-form/helpers.ts | 40 + .../forms/crud-form/validators/file-validator.ts | 15 + .../forms/crud-form/validators/json-validator.ts | 12 + .../crud-form/validators/rgw-role-validator.ts | 19 + .../src/app/shared/helpers/helpers.module.ts | 8 + .../app/shared/helpers/prometheus-list-helper.ts | 24 + .../src/app/shared/models/alertmanager-silence.ts | 26 + .../frontend/src/app/shared/models/breadcrumbs.ts | 59 + .../frontend/src/app/shared/models/cd-details.ts | 5 + .../shared/models/cd-form-modal-field-config.ts | 32 + .../src/app/shared/models/cd-notification.spec.ts | 95 + .../src/app/shared/models/cd-notification.ts | 50 + .../shared/models/cd-pwd-expiration-settings.ts | 11 + .../app/shared/models/cd-pwd-policy-settings.ts | 23 + .../src/app/shared/models/cd-table-action.ts | 44 + .../app/shared/models/cd-table-column-filter.ts | 7 + .../models/cd-table-column-filters-change.ts | 22 + .../src/app/shared/models/cd-table-column.ts | 38 + .../shared/models/cd-table-fetch-data-context.ts | 51 + .../src/app/shared/models/cd-table-paging.ts | 20 + .../src/app/shared/models/cd-table-selection.ts | 45 + .../src/app/shared/models/cd-user-config.ts | 11 + .../app/shared/models/cephfs-directory-models.ts | 21 + .../shared/models/cephfs-subvolume-group.model.ts | 13 + .../app/shared/models/cephfs-subvolume.model.ts | 18 + .../shared/models/cephfs-subvolumegroup.model.ts | 13 + .../src/app/shared/models/chart-tooltip.ts | 115 + .../src/app/shared/models/configuration.ts | 43 + .../frontend/src/app/shared/models/credentials.ts | 4 + .../src/app/shared/models/crud-table-metadata.ts | 17 + .../frontend/src/app/shared/models/crush-node.ts | 17 + .../frontend/src/app/shared/models/crush-rule.ts | 16 + .../frontend/src/app/shared/models/crush-step.ts | 7 + .../src/app/shared/models/daemon.interface.ts | 12 + .../frontend/src/app/shared/models/devices.ts | 25 + .../src/app/shared/models/erasure-code-profile.ts | 17 + .../src/app/shared/models/executing-task.ts | 6 + .../src/app/shared/models/finished-task.ts | 15 + .../frontend/src/app/shared/models/flag.ts | 8 + .../frontend/src/app/shared/models/image-spec.ts | 25 + .../shared/models/inventory-device-type.model.ts | 9 + .../src/app/shared/models/login-response.ts | 7 + .../src/app/shared/models/mirroring-summary.ts | 5 + .../src/app/shared/models/orchestrator.enum.ts | 25 + .../app/shared/models/orchestrator.interface.ts | 9 + .../app/shared/models/osd-deployment-options.ts | 24 + .../frontend/src/app/shared/models/osd-settings.ts | 4 + .../src/app/shared/models/permission.spec.ts | 62 + .../frontend/src/app/shared/models/permissions.ts | 50 + .../src/app/shared/models/pool-form-info.ts | 20 + .../src/app/shared/models/prometheus-alerts.ts | 85 + .../src/app/shared/models/service.interface.ts | 50 + .../frontend/src/app/shared/models/smart.ts | 253 + .../src/app/shared/models/summary.model.ts | 15 + .../src/app/shared/models/task-exception.ts | 9 + .../frontend/src/app/shared/models/task.ts | 10 + .../src/app/shared/models/upgrade.interface.ts | 15 + .../frontend/src/app/shared/models/wizard-steps.ts | 4 + .../src/app/shared/pipes/array.pipe.spec.ts | 21 + .../frontend/src/app/shared/pipes/array.pipe.ts | 26 + .../src/app/shared/pipes/boolean-text.pipe.spec.ts | 37 + .../src/app/shared/pipes/boolean-text.pipe.ts | 14 + .../src/app/shared/pipes/boolean.pipe.spec.ts | 57 + .../frontend/src/app/shared/pipes/boolean.pipe.ts | 26 + .../src/app/shared/pipes/cd-date.pipe.spec.ts | 24 + .../frontend/src/app/shared/pipes/cd-date.pipe.ts | 20 + .../shared/pipes/ceph-release-name.pipe.spec.ts | 28 + .../src/app/shared/pipes/ceph-release-name.pipe.ts | 24 + .../shared/pipes/ceph-short-version.pipe.spec.ts | 21 + .../app/shared/pipes/ceph-short-version.pipe.ts | 18 + .../shared/pipes/dimless-binary-per-second.pipe.ts | 19 + .../app/shared/pipes/dimless-binary.pipe.spec.ts | 56 + .../src/app/shared/pipes/dimless-binary.pipe.ts | 19 + .../src/app/shared/pipes/dimless.pipe.spec.ts | 56 + .../frontend/src/app/shared/pipes/dimless.pipe.ts | 19 + .../src/app/shared/pipes/duration.pipe.spec.ts | 17 + .../frontend/src/app/shared/pipes/duration.pipe.ts | 40 + .../src/app/shared/pipes/empty.pipe.spec.ts | 18 + .../frontend/src/app/shared/pipes/empty.pipe.ts | 17 + .../src/app/shared/pipes/encode-uri.pipe.spec.ts | 13 + .../src/app/shared/pipes/encode-uri.pipe.ts | 10 + .../src/app/shared/pipes/filter.pipe.spec.ts | 54 + .../frontend/src/app/shared/pipes/filter.pipe.ts | 25 + .../src/app/shared/pipes/health-color.pipe.spec.ts | 47 + .../src/app/shared/pipes/health-color.pipe.ts | 17 + .../src/app/shared/pipes/health-icon.pipe.spec.ts | 20 + .../src/app/shared/pipes/health-icon.pipe.ts | 12 + .../src/app/shared/pipes/health-label.pipe.spec.ts | 24 + .../src/app/shared/pipes/health-label.pipe.ts | 12 + .../src/app/shared/pipes/iops.pipe.spec.ts | 8 + .../frontend/src/app/shared/pipes/iops.pipe.ts | 10 + .../app/shared/pipes/iscsi-backstore.pipe.spec.ts | 17 + .../src/app/shared/pipes/iscsi-backstore.pipe.ts | 15 + .../src/app/shared/pipes/join.pipe.spec.ts | 13 + .../frontend/src/app/shared/pipes/join.pipe.ts | 10 + .../src/app/shared/pipes/log-priority.pipe.spec.ts | 32 + .../src/app/shared/pipes/log-priority.pipe.ts | 20 + .../frontend/src/app/shared/pipes/map.pipe.spec.ts | 25 + .../frontend/src/app/shared/pipes/map.pipe.ts | 15 + .../src/app/shared/pipes/mds-summary.pipe.spec.ts | 76 + .../src/app/shared/pipes/mds-summary.pipe.ts | 55 + .../src/app/shared/pipes/mgr-summary.pipe.spec.ts | 38 + .../src/app/shared/pipes/mgr-summary.pipe.ts | 37 + .../src/app/shared/pipes/milliseconds.pipe.spec.ts | 8 + .../src/app/shared/pipes/milliseconds.pipe.ts | 10 + .../app/shared/pipes/not-available.pipe.spec.ts | 30 + .../src/app/shared/pipes/not-available.pipe.ts | 15 + .../pipes/octal-to-human-readable.pipe.spec.ts | 32 + .../shared/pipes/octal-to-human-readable.pipe.ts | 96 + .../src/app/shared/pipes/ordinal.pipe.spec.ts | 8 + .../frontend/src/app/shared/pipes/ordinal.pipe.ts | 25 + .../src/app/shared/pipes/osd-summary.pipe.spec.ts | 43 + .../src/app/shared/pipes/osd-summary.pipe.ts | 46 + .../src/app/shared/pipes/path.pipe.spec.ts | 18 + .../frontend/src/app/shared/pipes/path.pipe.ts | 17 + .../frontend/src/app/shared/pipes/pipes.module.ts | 152 + .../pipes/rbd-configuration-source.pipe.spec.ts | 22 + .../shared/pipes/rbd-configuration-source.pipe.ts | 15 + .../app/shared/pipes/relative-date.pipe.spec.ts | 44 + .../src/app/shared/pipes/relative-date.pipe.ts | 58 + .../src/app/shared/pipes/round.pipe.spec.ts | 13 + .../frontend/src/app/shared/pipes/round.pipe.ts | 12 + .../app/shared/pipes/sanitize-html.pipe.spec.ts | 26 + .../src/app/shared/pipes/sanitize-html.pipe.ts | 13 + .../app/shared/pipes/search-highlight.pipe.spec.ts | 41 + .../src/app/shared/pipes/search-highlight.pipe.ts | 26 + .../src/app/shared/pipes/truncate.pipe.spec.ts | 21 + .../frontend/src/app/shared/pipes/truncate.pipe.ts | 16 + .../src/app/shared/pipes/upper-first.pipe.spec.ts | 17 + .../src/app/shared/pipes/upper-first.pipe.ts | 12 + .../rxjs/operators/page-visibilty.operator.ts | 20 + .../services/api-interceptor.service.spec.ts | 227 + .../app/shared/services/api-interceptor.service.ts | 133 + .../app/shared/services/auth-guard.service.spec.ts | 54 + .../src/app/shared/services/auth-guard.service.ts | 29 + .../shared/services/auth-storage.service.spec.ts | 47 + .../app/shared/services/auth-storage.service.ts | 59 + .../services/cd-table-server-side.service.spec.ts | 16 + .../services/cd-table-server-side.service.ts | 14 + .../services/change-password-guard.service.spec.ts | 68 + .../services/change-password-guard.service.ts | 42 + .../services/crud-form-adapter.service.spec.ts | 19 + .../shared/services/crud-form-adapter.service.ts | 42 + .../shared/services/data-gateway.service.spec.ts | 20 + .../app/shared/services/data-gateway.service.ts | 90 + .../src/app/shared/services/device.service.spec.ts | 92 + .../src/app/shared/services/device.service.ts | 57 + .../src/app/shared/services/doc.service.spec.ts | 75 + .../src/app/shared/services/doc.service.ts | 68 + .../app/shared/services/favicon.service.spec.ts | 23 + .../src/app/shared/services/favicon.service.ts | 79 + .../services/feature-toggles-guard.service.spec.ts | 72 + .../services/feature-toggles-guard.service.ts | 30 + .../services/feature-toggles.service.spec.ts | 54 + .../app/shared/services/feature-toggles.service.ts | 38 + .../app/shared/services/formatter.service.spec.ts | 112 + .../src/app/shared/services/formatter.service.ts | 141 + .../shared/services/js-error-handler.service.ts | 33 + .../app/shared/services/language.service.spec.ts | 34 + .../src/app/shared/services/language.service.ts | 23 + .../src/app/shared/services/modal.service.spec.ts | 59 + .../src/app/shared/services/modal.service.ts | 28 + .../services/module-status-guard.service.spec.ts | 102 + .../shared/services/module-status-guard.service.ts | 104 + .../services/motd-notification.service.spec.ts | 117 + .../shared/services/motd-notification.service.ts | 84 + .../shared/services/ngzone-scheduler.service.ts | 48 + .../shared/services/no-sso-guard.service.spec.ts | 49 + .../app/shared/services/no-sso-guard.service.ts | 28 + .../shared/services/notification.service.spec.ts | 285 + .../app/shared/services/notification.service.ts | 237 + .../services/number-formatter.service.spec.ts | 16 + .../shared/services/number-formatter.service.ts | 68 + .../services/password-policy.service.spec.ts | 208 + .../app/shared/services/password-policy.service.ts | 65 + .../services/prometheus-alert-formatter.spec.ts | 95 + .../shared/services/prometheus-alert-formatter.ts | 74 + .../services/prometheus-alert.service.spec.ts | 214 + .../shared/services/prometheus-alert.service.ts | 116 + .../prometheus-notification.service.spec.ts | 227 + .../services/prometheus-notification.service.ts | 51 + .../prometheus-silence-matcher.service.spec.ts | 133 + .../services/prometheus-silence-matcher.service.ts | 75 + .../services/rbd-configuration.service.spec.ts | 45 + .../shared/services/rbd-configuration.service.ts | 144 + .../services/refresh-interval.service.spec.ts | 52 + .../shared/services/refresh-interval.service.ts | 46 + .../app/shared/services/summary.service.spec.ts | 179 + .../src/app/shared/services/summary.service.ts | 89 + .../app/shared/services/task-list.service.spec.ts | 133 + .../src/app/shared/services/task-list.service.ts | 111 + .../shared/services/task-manager.service.spec.ts | 72 + .../app/shared/services/task-manager.service.ts | 59 + .../shared/services/task-message.service.spec.ts | 312 + .../app/shared/services/task-message.service.ts | 491 + .../shared/services/task-wrapper.service.spec.ts | 98 + .../app/shared/services/task-wrapper.service.ts | 68 + .../telemetry-notification.service.spec.ts | 33 + .../services/telemetry-notification.service.ts | 16 + .../services/text-to-download.service.spec.ts | 20 + .../shared/services/text-to-download.service.ts | 12 + .../app/shared/services/time-diff.service.spec.ts | 71 + .../src/app/shared/services/time-diff.service.ts | 55 + .../src/app/shared/services/timer.service.spec.ts | 68 + .../src/app/shared/services/timer.service.ts | 29 + .../shared/services/url-builder.service.spec.ts | 37 + .../src/app/shared/services/url-builder.service.ts | 50 + .../shared/services/wizard-steps.service.spec.ts | 16 + .../app/shared/services/wizard-steps.service.ts | 58 + .../frontend/src/app/shared/shared.module.ts | 43 + .../mgr/dashboard/frontend/src/assets/.gitkeep | 0 .../assets/Ceph_Ceph_Logo_with_text_red_white.svg | 69 + .../src/assets/Ceph_Ceph_Logo_with_text_white.svg | 69 + .../dashboard/frontend/src/assets/Ceph_Logo.svg | 71 + .../frontend/src/assets/ceph_background.gif | Bin 0 -> 98115 bytes .../mgr/dashboard/frontend/src/assets/loading.gif | Bin 0 -> 35386 bytes .../dashboard/frontend/src/assets/logo-mini.png | Bin 0 -> 1811 bytes .../frontend/src/assets/prometheus_logo.svg | 50 + .../frontend/src/environments/environment.tpl.ts | 10 + src/pybind/mgr/dashboard/frontend/src/favicon.ico | Bin 0 -> 1150 bytes src/pybind/mgr/dashboard/frontend/src/index.html | 24 + .../mgr/dashboard/frontend/src/jestGlobalMocks.ts | 7 + .../dashboard/frontend/src/locale/messages.cs.xlf | 6593 ++++ .../frontend/src/locale/messages.de-DE.xlf | 6595 ++++ .../frontend/src/locale/messages.es-ES.xlf | 6595 ++++ .../frontend/src/locale/messages.fr-FR.xlf | 6595 ++++ .../frontend/src/locale/messages.id-ID.xlf | 6595 ++++ .../frontend/src/locale/messages.it-IT.xlf | 6595 ++++ .../frontend/src/locale/messages.ja-JP.xlf | 6595 ++++ .../frontend/src/locale/messages.ko-KR.xlf | 6596 ++++ .../frontend/src/locale/messages.pl-PL.xlf | 6595 ++++ .../frontend/src/locale/messages.pt-BR.xlf | 6595 ++++ .../frontend/src/locale/messages.zh-CN.xlf | 6595 ++++ .../frontend/src/locale/messages.zh-TW.xlf | 6595 ++++ src/pybind/mgr/dashboard/frontend/src/main.ts | 23 + src/pybind/mgr/dashboard/frontend/src/polyfills.ts | 45 + src/pybind/mgr/dashboard/frontend/src/setupJest.ts | 15 + src/pybind/mgr/dashboard/frontend/src/styles.scss | 241 + .../frontend/src/styles/_chart-tooltip.scss | 59 + .../frontend/src/styles/bootstrap-extends.scss | 123 + .../frontend/src/styles/ceph-custom/_basics.scss | 112 + .../frontend/src/styles/ceph-custom/_buttons.scss | 100 + .../frontend/src/styles/ceph-custom/_dropdown.scss | 35 + .../frontend/src/styles/ceph-custom/_forms.scss | 105 + .../frontend/src/styles/ceph-custom/_grid.scss | 6 + .../frontend/src/styles/ceph-custom/_icons.scss | 16 + .../frontend/src/styles/ceph-custom/_index.scss | 5 + .../frontend/src/styles/ceph-custom/_navs.scss | 5 + .../frontend/src/styles/ceph-custom/_toast.scss | 30 + .../src/styles/defaults/_bootstrap-defaults.scss | 139 + .../frontend/src/styles/defaults/_functions.scss | 5 + .../frontend/src/styles/defaults/_index.scss | 4 + .../frontend/src/styles/defaults/_mixins.scss | 34 + .../frontend/src/styles/vendor/_index.scss | 22 + .../src/styles/vendor/_style-overrides.scss | 4 + .../frontend/src/styles/vendor/_variables.scss | 17 + .../frontend/src/testing/activated-route-stub.ts | 26 + .../frontend/src/testing/unit-test-helper.ts | 687 + src/pybind/mgr/dashboard/frontend/src/typings.d.ts | 5 + .../mgr/dashboard/frontend/tsconfig.app.json | 14 + src/pybind/mgr/dashboard/frontend/tsconfig.json | 32 + .../mgr/dashboard/frontend/tsconfig.spec.json | 21 + src/pybind/mgr/dashboard/grafana.py | 136 + src/pybind/mgr/dashboard/model/__init__.py | 1 + src/pybind/mgr/dashboard/module.py | 649 + src/pybind/mgr/dashboard/openapi.yaml | 12723 ++++++++ src/pybind/mgr/dashboard/plugins/__init__.py | 71 + src/pybind/mgr/dashboard/plugins/debug.py | 94 + .../mgr/dashboard/plugins/feature_toggles.py | 159 + src/pybind/mgr/dashboard/plugins/interfaces.py | 80 + src/pybind/mgr/dashboard/plugins/lru_cache.py | 43 + src/pybind/mgr/dashboard/plugins/motd.py | 98 + src/pybind/mgr/dashboard/plugins/pluggy.py | 116 + src/pybind/mgr/dashboard/plugins/plugin.py | 38 + src/pybind/mgr/dashboard/plugins/ttl_cache.py | 119 + src/pybind/mgr/dashboard/requirements-extra.txt | 1 + src/pybind/mgr/dashboard/requirements-lint.txt | 11 + src/pybind/mgr/dashboard/requirements-test.txt | 4 + src/pybind/mgr/dashboard/requirements.txt | 14 + src/pybind/mgr/dashboard/rest_client.py | 566 + .../mgr/dashboard/run-backend-api-request.sh | 24 + src/pybind/mgr/dashboard/run-backend-api-tests.sh | 182 + .../mgr/dashboard/run-backend-rook-api-request.sh | 40 + src/pybind/mgr/dashboard/run-frontend-e2e-tests.sh | 151 + src/pybind/mgr/dashboard/run-frontend-unittests.sh | 50 + src/pybind/mgr/dashboard/security.py | 60 + src/pybind/mgr/dashboard/services/__init__.py | 1 + src/pybind/mgr/dashboard/services/_paginate.py | 71 + .../mgr/dashboard/services/access_control.py | 942 + src/pybind/mgr/dashboard/services/auth.py | 224 + src/pybind/mgr/dashboard/services/ceph_service.py | 571 + src/pybind/mgr/dashboard/services/cephfs.py | 262 + src/pybind/mgr/dashboard/services/cluster.py | 87 + src/pybind/mgr/dashboard/services/exception.py | 132 + src/pybind/mgr/dashboard/services/iscsi_cli.py | 58 + src/pybind/mgr/dashboard/services/iscsi_client.py | 258 + src/pybind/mgr/dashboard/services/iscsi_config.py | 111 + src/pybind/mgr/dashboard/services/orchestrator.py | 280 + src/pybind/mgr/dashboard/services/osd.py | 25 + src/pybind/mgr/dashboard/services/progress.py | 91 + src/pybind/mgr/dashboard/services/rbd.py | 775 + src/pybind/mgr/dashboard/services/rgw_client.py | 1638 + src/pybind/mgr/dashboard/services/settings.py | 30 + src/pybind/mgr/dashboard/services/sso.py | 293 + src/pybind/mgr/dashboard/services/tcmu_service.py | 113 + src/pybind/mgr/dashboard/settings.py | 258 + src/pybind/mgr/dashboard/tests/__init__.py | 391 + src/pybind/mgr/dashboard/tests/helper.py | 55 + .../mgr/dashboard/tests/test_access_control.py | 870 + .../mgr/dashboard/tests/test_api_auditing.py | 92 + src/pybind/mgr/dashboard/tests/test_auth.py | 66 + src/pybind/mgr/dashboard/tests/test_cache.py | 48 + .../mgr/dashboard/tests/test_ceph_service.py | 169 + src/pybind/mgr/dashboard/tests/test_ceph_users.py | 52 + src/pybind/mgr/dashboard/tests/test_cephfs.py | 42 + .../mgr/dashboard/tests/test_cluster_upgrade.py | 61 + src/pybind/mgr/dashboard/tests/test_controllers.py | 190 + src/pybind/mgr/dashboard/tests/test_crud.py | 68 + src/pybind/mgr/dashboard/tests/test_daemon.py | 46 + src/pybind/mgr/dashboard/tests/test_docs.py | 240 + .../dashboard/tests/test_erasure_code_profile.py | 29 + src/pybind/mgr/dashboard/tests/test_exceptions.py | 160 + .../mgr/dashboard/tests/test_feature_toggles.py | 64 + src/pybind/mgr/dashboard/tests/test_grafana.py | 133 + src/pybind/mgr/dashboard/tests/test_home.py | 73 + src/pybind/mgr/dashboard/tests/test_host.py | 602 + src/pybind/mgr/dashboard/tests/test_iscsi.py | 1276 + src/pybind/mgr/dashboard/tests/test_nfs.py | 247 + .../mgr/dashboard/tests/test_notification.py | 136 + .../mgr/dashboard/tests/test_orchestrator.py | 40 + src/pybind/mgr/dashboard/tests/test_osd.py | 492 + .../mgr/dashboard/tests/test_plugin_debug.py | 37 + src/pybind/mgr/dashboard/tests/test_pool.py | 180 + src/pybind/mgr/dashboard/tests/test_prometheus.py | 162 + .../mgr/dashboard/tests/test_rbd_mirroring.py | 318 + src/pybind/mgr/dashboard/tests/test_rbd_service.py | 179 + src/pybind/mgr/dashboard/tests/test_rest_client.py | 110 + src/pybind/mgr/dashboard/tests/test_rest_tasks.py | 92 + src/pybind/mgr/dashboard/tests/test_rgw.py | 241 + src/pybind/mgr/dashboard/tests/test_rgw_client.py | 357 + src/pybind/mgr/dashboard/tests/test_settings.py | 207 + src/pybind/mgr/dashboard/tests/test_ssl.py | 28 + src/pybind/mgr/dashboard/tests/test_sso.py | 190 + src/pybind/mgr/dashboard/tests/test_task.py | 432 + src/pybind/mgr/dashboard/tests/test_tools.py | 210 + src/pybind/mgr/dashboard/tests/test_versioning.py | 78 + src/pybind/mgr/dashboard/tools.py | 840 + src/pybind/mgr/dashboard/tox.ini | 176 + src/pybind/mgr/devicehealth/__init__.py | 2 + src/pybind/mgr/devicehealth/module.py | 780 + src/pybind/mgr/diskprediction_local/__init__.py | 2 + .../models/prophetstor/config.json | 77 + .../models/prophetstor/svm_1.pkl | Bin 0 -> 281292 bytes .../models/prophetstor/svm_10.pkl | Bin 0 -> 217792 bytes .../models/prophetstor/svm_104.pkl | Bin 0 -> 492492 bytes .../models/prophetstor/svm_105.pkl | Bin 0 -> 217192 bytes .../models/prophetstor/svm_109.pkl | Bin 0 -> 256392 bytes .../models/prophetstor/svm_112.pkl | Bin 0 -> 499492 bytes .../models/prophetstor/svm_114.pkl | Bin 0 -> 276492 bytes .../models/prophetstor/svm_115.pkl | Bin 0 -> 509592 bytes .../models/prophetstor/svm_118.pkl | Bin 0 -> 315192 bytes .../models/prophetstor/svm_119.pkl | Bin 0 -> 485992 bytes .../models/prophetstor/svm_12.pkl | Bin 0 -> 275692 bytes .../models/prophetstor/svm_120.pkl | Bin 0 -> 307592 bytes .../models/prophetstor/svm_123.pkl | Bin 0 -> 246792 bytes .../models/prophetstor/svm_124.pkl | Bin 0 -> 310292 bytes .../models/prophetstor/svm_125.pkl | Bin 0 -> 452492 bytes .../models/prophetstor/svm_128.pkl | Bin 0 -> 550492 bytes .../models/prophetstor/svm_131.pkl | Bin 0 -> 493192 bytes .../models/prophetstor/svm_134.pkl | Bin 0 -> 266692 bytes .../models/prophetstor/svm_138.pkl | Bin 0 -> 488292 bytes .../models/prophetstor/svm_14.pkl | Bin 0 -> 244892 bytes .../models/prophetstor/svm_141.pkl | Bin 0 -> 422368 bytes .../models/prophetstor/svm_145.pkl | Bin 0 -> 359512 bytes .../models/prophetstor/svm_151.pkl | Bin 0 -> 305944 bytes .../models/prophetstor/svm_16.pkl | Bin 0 -> 308192 bytes .../models/prophetstor/svm_161.pkl | Bin 0 -> 305188 bytes .../models/prophetstor/svm_168.pkl | Bin 0 -> 301516 bytes .../models/prophetstor/svm_169.pkl | Bin 0 -> 363400 bytes .../models/prophetstor/svm_174.pkl | Bin 0 -> 323764 bytes .../models/prophetstor/svm_18.pkl | Bin 0 -> 312692 bytes .../models/prophetstor/svm_182.pkl | Bin 0 -> 354652 bytes .../models/prophetstor/svm_185.pkl | Bin 0 -> 317176 bytes .../models/prophetstor/svm_186.pkl | Bin 0 -> 276352 bytes .../models/prophetstor/svm_195.pkl | Bin 0 -> 489544 bytes .../models/prophetstor/svm_201.pkl | Bin 0 -> 307888 bytes .../models/prophetstor/svm_204.pkl | Bin 0 -> 567088 bytes .../models/prophetstor/svm_206.pkl | Bin 0 -> 474856 bytes .../models/prophetstor/svm_208.pkl | Bin 0 -> 283588 bytes .../models/prophetstor/svm_210.pkl | Bin 0 -> 617200 bytes .../models/prophetstor/svm_212.pkl | Bin 0 -> 345148 bytes .../models/prophetstor/svm_213.pkl | Bin 0 -> 357568 bytes .../models/prophetstor/svm_219.pkl | Bin 0 -> 342232 bytes .../models/prophetstor/svm_221.pkl | Bin 0 -> 365128 bytes .../models/prophetstor/svm_222.pkl | Bin 0 -> 314800 bytes .../models/prophetstor/svm_223.pkl | Bin 0 -> 342124 bytes .../models/prophetstor/svm_225.pkl | Bin 0 -> 329812 bytes .../models/prophetstor/svm_227.pkl | Bin 0 -> 296440 bytes .../models/prophetstor/svm_229.pkl | Bin 0 -> 572380 bytes .../models/prophetstor/svm_230.pkl | Bin 0 -> 251188 bytes .../models/prophetstor/svm_234.pkl | Bin 0 -> 277972 bytes .../models/prophetstor/svm_235.pkl | Bin 0 -> 243736 bytes .../models/prophetstor/svm_236.pkl | Bin 0 -> 377872 bytes .../models/prophetstor/svm_239.pkl | Bin 0 -> 571732 bytes .../models/prophetstor/svm_243.pkl | Bin 0 -> 534148 bytes .../models/prophetstor/svm_27.pkl | Bin 0 -> 504592 bytes .../models/prophetstor/svm_3.pkl | Bin 0 -> 557192 bytes .../models/prophetstor/svm_33.pkl | Bin 0 -> 547392 bytes .../models/prophetstor/svm_36.pkl | Bin 0 -> 516692 bytes .../models/prophetstor/svm_44.pkl | Bin 0 -> 546592 bytes .../models/prophetstor/svm_50.pkl | Bin 0 -> 448292 bytes .../models/prophetstor/svm_57.pkl | Bin 0 -> 328292 bytes .../models/prophetstor/svm_59.pkl | Bin 0 -> 494292 bytes .../models/prophetstor/svm_6.pkl | Bin 0 -> 314092 bytes .../models/prophetstor/svm_61.pkl | Bin 0 -> 499492 bytes .../models/prophetstor/svm_62.pkl | Bin 0 -> 483492 bytes .../models/prophetstor/svm_67.pkl | Bin 0 -> 492592 bytes .../models/prophetstor/svm_69.pkl | Bin 0 -> 288292 bytes .../models/prophetstor/svm_71.pkl | Bin 0 -> 228792 bytes .../models/prophetstor/svm_72.pkl | Bin 0 -> 489492 bytes .../models/prophetstor/svm_78.pkl | Bin 0 -> 491392 bytes .../models/prophetstor/svm_79.pkl | Bin 0 -> 284992 bytes .../models/prophetstor/svm_82.pkl | Bin 0 -> 255292 bytes .../models/prophetstor/svm_85.pkl | Bin 0 -> 522092 bytes .../models/prophetstor/svm_88.pkl | Bin 0 -> 502392 bytes .../models/prophetstor/svm_93.pkl | Bin 0 -> 302592 bytes .../models/prophetstor/svm_97.pkl | Bin 0 -> 272392 bytes .../diskprediction_local/models/redhat/config.json | 4 + .../models/redhat/hgst_predictor.pkl | Bin 0 -> 2860606 bytes .../models/redhat/hgst_scaler.pkl | Bin 0 -> 1865 bytes .../models/redhat/seagate_predictor.pkl | Bin 0 -> 37062936 bytes .../models/redhat/seagate_scaler.pkl | Bin 0 -> 1481 bytes src/pybind/mgr/diskprediction_local/module.py | 305 + src/pybind/mgr/diskprediction_local/predictor.py | 484 + .../mgr/diskprediction_local/requirements.txt | 3 + src/pybind/mgr/feedback/__init__.py | 2 + src/pybind/mgr/feedback/model.py | 47 + src/pybind/mgr/feedback/module.py | 139 + src/pybind/mgr/feedback/service.py | 49 + src/pybind/mgr/hello/__init__.py | 2 + src/pybind/mgr/hello/module.py | 137 + src/pybind/mgr/influx/__init__.py | 1 + src/pybind/mgr/influx/module.py | 481 + src/pybind/mgr/insights/__init__.py | 6 + src/pybind/mgr/insights/health.py | 195 + src/pybind/mgr/insights/module.py | 321 + src/pybind/mgr/insights/tests/__init__.py | 0 src/pybind/mgr/insights/tests/test_health.py | 275 + src/pybind/mgr/iostat/__init__.py | 2 + src/pybind/mgr/iostat/module.py | 62 + src/pybind/mgr/k8sevents/README.md | 81 + src/pybind/mgr/k8sevents/__init__.py | 1 + src/pybind/mgr/k8sevents/module.py | 1455 + src/pybind/mgr/k8sevents/rbac_sample.yaml | 45 + src/pybind/mgr/localpool/__init__.py | 2 + src/pybind/mgr/localpool/module.py | 136 + src/pybind/mgr/mds_autoscaler/__init__.py | 6 + src/pybind/mgr/mds_autoscaler/module.py | 99 + src/pybind/mgr/mds_autoscaler/tests/__init__.py | 0 .../mgr/mds_autoscaler/tests/test_autoscaler.py | 88 + src/pybind/mgr/mgr_module.py | 2381 ++ src/pybind/mgr/mgr_util.py | 876 + src/pybind/mgr/mirroring/__init__.py | 1 + src/pybind/mgr/mirroring/fs/__init__.py | 0 src/pybind/mgr/mirroring/fs/blocklist.py | 10 + src/pybind/mgr/mirroring/fs/dir_map/__init__.py | 0 src/pybind/mgr/mirroring/fs/dir_map/create.py | 23 + src/pybind/mgr/mirroring/fs/dir_map/load.py | 74 + src/pybind/mgr/mirroring/fs/dir_map/policy.py | 380 + .../mgr/mirroring/fs/dir_map/state_transition.py | 94 + src/pybind/mgr/mirroring/fs/dir_map/update.py | 151 + src/pybind/mgr/mirroring/fs/exception.py | 3 + src/pybind/mgr/mirroring/fs/notify.py | 121 + src/pybind/mgr/mirroring/fs/snapshot_mirror.py | 792 + src/pybind/mgr/mirroring/fs/utils.py | 152 + src/pybind/mgr/mirroring/module.py | 103 + src/pybind/mgr/nfs/__init__.py | 7 + src/pybind/mgr/nfs/cluster.py | 309 + src/pybind/mgr/nfs/exception.py | 32 + src/pybind/mgr/nfs/export.py | 856 + src/pybind/mgr/nfs/ganesha_conf.py | 548 + src/pybind/mgr/nfs/module.py | 189 + src/pybind/mgr/nfs/tests/__init__.py | 0 src/pybind/mgr/nfs/tests/test_nfs.py | 1156 + src/pybind/mgr/nfs/utils.py | 104 + src/pybind/mgr/object_format.py | 612 + src/pybind/mgr/orchestrator/README.md | 14 + src/pybind/mgr/orchestrator/__init__.py | 20 + src/pybind/mgr/orchestrator/_interface.py | 1664 + src/pybind/mgr/orchestrator/module.py | 1908 ++ src/pybind/mgr/orchestrator/tests/__init__.py | 0 .../mgr/orchestrator/tests/test_orchestrator.py | 292 + src/pybind/mgr/osd_perf_query/__init__.py | 1 + src/pybind/mgr/osd_perf_query/module.py | 196 + src/pybind/mgr/osd_support/__init__.py | 1 + src/pybind/mgr/osd_support/module.py | 19 + src/pybind/mgr/pg_autoscaler/__init__.py | 6 + src/pybind/mgr/pg_autoscaler/module.py | 838 + src/pybind/mgr/pg_autoscaler/tests/__init__.py | 0 .../tests/test_cal_final_pg_target.py | 676 + .../mgr/pg_autoscaler/tests/test_cal_ratio.py | 37 + .../pg_autoscaler/tests/test_overlapping_roots.py | 514 + src/pybind/mgr/progress/__init__.py | 7 + src/pybind/mgr/progress/module.py | 882 + src/pybind/mgr/progress/test_progress.py | 174 + src/pybind/mgr/prometheus/__init__.py | 2 + src/pybind/mgr/prometheus/module.py | 2038 ++ src/pybind/mgr/prometheus/test_module.py | 93 + src/pybind/mgr/rbd_support/__init__.py | 2 + src/pybind/mgr/rbd_support/common.py | 48 + .../mgr/rbd_support/mirror_snapshot_schedule.py | 617 + src/pybind/mgr/rbd_support/module.py | 321 + src/pybind/mgr/rbd_support/perf.py | 524 + src/pybind/mgr/rbd_support/schedule.py | 579 + src/pybind/mgr/rbd_support/task.py | 857 + src/pybind/mgr/rbd_support/trash_purge_schedule.py | 282 + src/pybind/mgr/requirements-required.txt | 18 + src/pybind/mgr/requirements.txt | 4 + src/pybind/mgr/restful/__init__.py | 1 + src/pybind/mgr/restful/api/__init__.py | 39 + src/pybind/mgr/restful/api/config.py | 86 + src/pybind/mgr/restful/api/crush.py | 25 + src/pybind/mgr/restful/api/doc.py | 15 + src/pybind/mgr/restful/api/mon.py | 40 + src/pybind/mgr/restful/api/osd.py | 135 + src/pybind/mgr/restful/api/perf.py | 27 + src/pybind/mgr/restful/api/pool.py | 140 + src/pybind/mgr/restful/api/request.py | 93 + src/pybind/mgr/restful/api/server.py | 35 + src/pybind/mgr/restful/common.py | 156 + src/pybind/mgr/restful/context.py | 2 + src/pybind/mgr/restful/decorators.py | 81 + src/pybind/mgr/restful/hooks.py | 10 + src/pybind/mgr/restful/module.py | 613 + src/pybind/mgr/rgw/__init__.py | 2 + src/pybind/mgr/rgw/module.py | 383 + src/pybind/mgr/rook/.gitignore | 1 + src/pybind/mgr/rook/CMakeLists.txt | 20 + src/pybind/mgr/rook/__init__.py | 5 + src/pybind/mgr/rook/ci/Dockerfile | 3 + src/pybind/mgr/rook/ci/run-rook-e2e-tests.sh | 9 + .../mgr/rook/ci/scripts/bootstrap-rook-cluster.sh | 135 + src/pybind/mgr/rook/ci/tests/features/rook.feature | 12 + .../rook/ci/tests/features/steps/implementation.py | 21 + .../mgr/rook/ci/tests/features/steps/utils.py | 29 + src/pybind/mgr/rook/generate_rook_ceph_client.sh | 14 + src/pybind/mgr/rook/module.py | 727 + src/pybind/mgr/rook/requirements.txt | 2 + .../.github/workflows/generate.yml | 21 + src/pybind/mgr/rook/rook-client-python/.gitignore | 12 + src/pybind/mgr/rook/rook-client-python/LICENSE | 201 + src/pybind/mgr/rook/rook-client-python/README.md | 81 + src/pybind/mgr/rook/rook-client-python/conftest.py | 11 + src/pybind/mgr/rook/rook-client-python/generate.sh | 35 + .../rook-client-python/generate_model_classes.py | 402 + src/pybind/mgr/rook/rook-client-python/mypy.ini | 7 + .../mgr/rook/rook-client-python/requirements.txt | 7 + .../rook-client-python/rook-python-client-demo.gif | Bin 0 -> 119572 bytes .../rook-client-python/rook_client/__init__.py | 1 + .../rook/rook-client-python/rook_client/_helper.py | 128 + .../rook-client-python/rook_client/_helper.py.orig | 133 + .../rook_client/cassandra/__init__.py | 0 .../rook_client/cassandra/cluster.py | 317 + .../rook_client/ceph/__init__.py | 0 .../rook_client/ceph/cephblockpool.py | 1193 + .../rook_client/ceph/cephclient.py | 157 + .../rook_client/ceph/cephcluster.py | 3959 +++ .../rook_client/ceph/cephfilesystem.py | 1771 ++ .../rook_client/ceph/cephfilesystemmirror.py | 1013 + .../rook-client-python/rook_client/ceph/cephnfs.py | 1111 + .../rook_client/ceph/cephobjectrealm.py | 154 + .../rook_client/ceph/cephobjectstore.py | 2631 ++ .../rook_client/ceph/cephobjectstoreuser.py | 157 + .../rook_client/ceph/cephobjectzone.py | 797 + .../rook_client/ceph/cephobjectzonegroup.py | 131 + .../rook_client/ceph/cephrbdmirror.py | 1066 + .../rook_client/ceph/objectbucket.py | 252 + .../rook_client/ceph/objectbucketclaim.py | 147 + .../rook-client-python/rook_client/ceph/volume.py | 177 + .../rook_client/ceph/volumereplication.py | 363 + .../rook_client/ceph/volumereplicationclass.py | 121 + .../rook/rook-client-python/rook_client/py.typed | 1 + .../rook_client/tests/__init__.py | 0 .../rook_client/tests/test_README.py | 28 + .../rook_client/tests/test_examples.py | 52 + .../rook_client/tests/test_properties.py | 13 + src/pybind/mgr/rook/rook-client-python/setup.py | 20 + src/pybind/mgr/rook/rook-client-python/tox.ini | 24 + src/pybind/mgr/rook/rook_client/__init__.py | 1 + src/pybind/mgr/rook/rook_client/_helper.py | 128 + src/pybind/mgr/rook/rook_cluster.py | 1591 + src/pybind/mgr/rook/tests/__init__.py | 0 src/pybind/mgr/rook/tests/fixtures.py | 11 + src/pybind/mgr/rook/tests/test_placement.py | 100 + src/pybind/mgr/rook/tests/test_rook.py | 120 + src/pybind/mgr/selftest/__init__.py | 2 + src/pybind/mgr/selftest/module.py | 508 + src/pybind/mgr/snap_schedule/.gitignore | 1 + src/pybind/mgr/snap_schedule/__init__.py | 11 + src/pybind/mgr/snap_schedule/fs/__init__.py | 0 src/pybind/mgr/snap_schedule/fs/schedule.py | 502 + src/pybind/mgr/snap_schedule/fs/schedule_client.py | 444 + src/pybind/mgr/snap_schedule/module.py | 258 + src/pybind/mgr/snap_schedule/requirements.txt | 1 + src/pybind/mgr/snap_schedule/tests/__init__.py | 0 src/pybind/mgr/snap_schedule/tests/conftest.py | 34 + src/pybind/mgr/snap_schedule/tests/fs/__init__.py | 0 .../mgr/snap_schedule/tests/fs/test_schedule.py | 256 + .../snap_schedule/tests/fs/test_schedule_client.py | 37 + src/pybind/mgr/snap_schedule/tox.ini | 19 + src/pybind/mgr/stats/__init__.py | 1 + src/pybind/mgr/stats/fs/__init__.py | 0 src/pybind/mgr/stats/fs/perf_stats.py | 567 + src/pybind/mgr/stats/module.py | 41 + src/pybind/mgr/status/__init__.py | 1 + src/pybind/mgr/status/module.py | 374 + src/pybind/mgr/telegraf/__init__.py | 1 + src/pybind/mgr/telegraf/basesocket.py | 49 + src/pybind/mgr/telegraf/module.py | 283 + src/pybind/mgr/telegraf/protocol.py | 50 + src/pybind/mgr/telegraf/utils.py | 26 + src/pybind/mgr/telemetry/__init__.py | 9 + src/pybind/mgr/telemetry/module.py | 2074 ++ src/pybind/mgr/telemetry/tests/__init__.py | 0 src/pybind/mgr/telemetry/tests/test_telemetry.py | 121 + src/pybind/mgr/telemetry/tox.ini | 12 + src/pybind/mgr/test_orchestrator/README.md | 16 + src/pybind/mgr/test_orchestrator/__init__.py | 1 + src/pybind/mgr/test_orchestrator/dummy_data.json | 463 + src/pybind/mgr/test_orchestrator/module.py | 306 + src/pybind/mgr/tests/__init__.py | 226 + src/pybind/mgr/tests/test_mgr_util.py | 19 + src/pybind/mgr/tests/test_object_format.py | 582 + src/pybind/mgr/tests/test_tls.py | 55 + src/pybind/mgr/tox.ini | 190 + src/pybind/mgr/volumes/__init__.py | 2 + src/pybind/mgr/volumes/fs/__init__.py | 0 src/pybind/mgr/volumes/fs/async_cloner.py | 413 + src/pybind/mgr/volumes/fs/async_job.py | 303 + src/pybind/mgr/volumes/fs/exception.py | 63 + src/pybind/mgr/volumes/fs/fs_util.py | 216 + src/pybind/mgr/volumes/fs/operations/__init__.py | 0 src/pybind/mgr/volumes/fs/operations/access.py | 141 + .../mgr/volumes/fs/operations/clone_index.py | 100 + src/pybind/mgr/volumes/fs/operations/group.py | 305 + src/pybind/mgr/volumes/fs/operations/index.py | 23 + src/pybind/mgr/volumes/fs/operations/lock.py | 43 + src/pybind/mgr/volumes/fs/operations/pin_util.py | 34 + .../mgr/volumes/fs/operations/rankevicter.py | 114 + src/pybind/mgr/volumes/fs/operations/resolver.py | 29 + .../mgr/volumes/fs/operations/snapshot_util.py | 32 + src/pybind/mgr/volumes/fs/operations/subvolume.py | 74 + src/pybind/mgr/volumes/fs/operations/template.py | 191 + src/pybind/mgr/volumes/fs/operations/trash.py | 145 + .../mgr/volumes/fs/operations/versions/__init__.py | 112 + .../fs/operations/versions/auth_metadata.py | 210 + .../fs/operations/versions/metadata_manager.py | 200 + .../mgr/volumes/fs/operations/versions/op_sm.py | 114 + .../fs/operations/versions/subvolume_attrs.py | 65 + .../fs/operations/versions/subvolume_base.py | 517 + .../volumes/fs/operations/versions/subvolume_v1.py | 904 + .../volumes/fs/operations/versions/subvolume_v2.py | 394 + src/pybind/mgr/volumes/fs/operations/volume.py | 296 + src/pybind/mgr/volumes/fs/purge_queue.py | 113 + src/pybind/mgr/volumes/fs/vol_spec.py | 45 + src/pybind/mgr/volumes/fs/volume.py | 1002 + src/pybind/mgr/volumes/module.py | 847 + src/pybind/mgr/zabbix/__init__.py | 1 + src/pybind/mgr/zabbix/module.py | 476 + src/pybind/mgr/zabbix/zabbix_template.xml | 3249 ++ src/pybind/rados/CMakeLists.txt | 7 + src/pybind/rados/MANIFEST.in | 2 + src/pybind/rados/c_rados.pxd | 312 + src/pybind/rados/ctime.pxd | 11 + src/pybind/rados/mock_rados.pxi | 465 + src/pybind/rados/rados.pxd | 45 + src/pybind/rados/rados.pyx | 4363 +++ src/pybind/rados/setup.py | 206 + src/pybind/rbd/CMakeLists.txt | 5 + src/pybind/rbd/MANIFEST.in | 1 + src/pybind/rbd/c_rbd.pxd | 732 + src/pybind/rbd/ctime.pxd | 7 + src/pybind/rbd/mock_rbd.pxi | 923 + src/pybind/rbd/rbd.pyx | 5892 ++++ src/pybind/rbd/setup.py | 213 + src/pybind/rgw/CMakeLists.txt | 6 + src/pybind/rgw/MANIFEST.in | 1 + src/pybind/rgw/c_rgw.pxd | 137 + src/pybind/rgw/cstat.pxd | 20 + src/pybind/rgw/mock_rgw.pxi | 156 + src/pybind/rgw/rgw.pyx | 544 + src/pybind/rgw/setup.py | 213 + src/pybind/tox.ini | 19 + 2062 files changed, 391718 insertions(+) create mode 100644 src/pybind/CMakeLists.txt create mode 100644 src/pybind/ceph_argparse.py create mode 100644 src/pybind/ceph_daemon.py create mode 100755 src/pybind/ceph_mgr_repl.py create mode 100644 src/pybind/cephfs/CMakeLists.txt create mode 100644 src/pybind/cephfs/MANIFEST.in create mode 100644 src/pybind/cephfs/c_cephfs.pxd create mode 100644 src/pybind/cephfs/cephfs.pyx create mode 100644 src/pybind/cephfs/mock_cephfs.pxi create mode 100755 src/pybind/cephfs/setup.py create mode 100644 src/pybind/cephfs/types.pxd create mode 100644 src/pybind/mgr/.gitignore create mode 100644 src/pybind/mgr/.pylintrc create mode 100644 src/pybind/mgr/CMakeLists.txt create mode 100644 src/pybind/mgr/alerts/__init__.py create mode 100644 src/pybind/mgr/alerts/module.py create mode 100644 src/pybind/mgr/balancer/__init__.py create mode 100644 src/pybind/mgr/balancer/module.py create mode 100644 src/pybind/mgr/ceph_module.pyi create mode 100644 src/pybind/mgr/cephadm/.gitignore create mode 100644 src/pybind/mgr/cephadm/HACKING.rst create mode 100644 src/pybind/mgr/cephadm/Vagrantfile create mode 100644 src/pybind/mgr/cephadm/__init__.py create mode 100644 src/pybind/mgr/cephadm/agent.py create mode 100644 src/pybind/mgr/cephadm/autotune.py create mode 100644 src/pybind/mgr/cephadm/ceph.repo create mode 100644 src/pybind/mgr/cephadm/configchecks.py create mode 100644 src/pybind/mgr/cephadm/exchange.py create mode 100644 src/pybind/mgr/cephadm/http_server.py create mode 100644 src/pybind/mgr/cephadm/inventory.py create mode 100644 src/pybind/mgr/cephadm/migrations.py create mode 100644 src/pybind/mgr/cephadm/module.py create mode 100644 src/pybind/mgr/cephadm/offline_watcher.py create mode 100644 src/pybind/mgr/cephadm/registry.py create mode 100644 src/pybind/mgr/cephadm/schedule.py create mode 100644 src/pybind/mgr/cephadm/serve.py create mode 100644 src/pybind/mgr/cephadm/service_discovery.py create mode 100644 src/pybind/mgr/cephadm/services/__init__.py create mode 100644 src/pybind/mgr/cephadm/services/cephadmservice.py create mode 100644 src/pybind/mgr/cephadm/services/container.py create mode 100644 src/pybind/mgr/cephadm/services/ingress.py create mode 100644 src/pybind/mgr/cephadm/services/iscsi.py create mode 100644 src/pybind/mgr/cephadm/services/jaeger.py create mode 100644 src/pybind/mgr/cephadm/services/monitoring.py create mode 100644 src/pybind/mgr/cephadm/services/nfs.py create mode 100644 src/pybind/mgr/cephadm/services/nvmeof.py create mode 100644 src/pybind/mgr/cephadm/services/osd.py create mode 100644 src/pybind/mgr/cephadm/ssh.py create mode 100644 src/pybind/mgr/cephadm/ssl_cert_utils.py create mode 100644 src/pybind/mgr/cephadm/template.py create mode 100644 src/pybind/mgr/cephadm/templates/blink_device_light_cmd.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/alertmanager/alertmanager.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/alertmanager/web.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/grafana/ceph-dashboard.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/iscsi/iscsi-gateway.cfg.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/loki.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/node-exporter/web.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/prometheus/web.yml.j2 create mode 100644 src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 create mode 100644 src/pybind/mgr/cephadm/tests/__init__.py create mode 100644 src/pybind/mgr/cephadm/tests/conftest.py create mode 100644 src/pybind/mgr/cephadm/tests/fixtures.py create mode 100644 src/pybind/mgr/cephadm/tests/test_autotune.py create mode 100644 src/pybind/mgr/cephadm/tests/test_cephadm.py create mode 100644 src/pybind/mgr/cephadm/tests/test_completion.py create mode 100644 src/pybind/mgr/cephadm/tests/test_configchecks.py create mode 100644 src/pybind/mgr/cephadm/tests/test_facts.py create mode 100644 src/pybind/mgr/cephadm/tests/test_migration.py create mode 100644 src/pybind/mgr/cephadm/tests/test_osd_removal.py create mode 100644 src/pybind/mgr/cephadm/tests/test_scheduling.py create mode 100644 src/pybind/mgr/cephadm/tests/test_service_discovery.py create mode 100644 src/pybind/mgr/cephadm/tests/test_services.py create mode 100644 src/pybind/mgr/cephadm/tests/test_spec.py create mode 100644 src/pybind/mgr/cephadm/tests/test_ssh.py create mode 100644 src/pybind/mgr/cephadm/tests/test_template.py create mode 100644 src/pybind/mgr/cephadm/tests/test_tuned_profiles.py create mode 100644 src/pybind/mgr/cephadm/tests/test_upgrade.py create mode 100644 src/pybind/mgr/cephadm/tuned_profiles.py create mode 100644 src/pybind/mgr/cephadm/upgrade.py create mode 100644 src/pybind/mgr/cephadm/utils.py create mode 100644 src/pybind/mgr/cephadm/vagrant.config.example.json create mode 100644 src/pybind/mgr/cli_api/__init__.py create mode 100755 src/pybind/mgr/cli_api/module.py create mode 100644 src/pybind/mgr/cli_api/tests/__init__.py create mode 100644 src/pybind/mgr/cli_api/tests/test_cli_api.py create mode 100644 src/pybind/mgr/crash/__init__.py create mode 100644 src/pybind/mgr/crash/module.py create mode 100644 src/pybind/mgr/dashboard/.coveragerc create mode 100644 src/pybind/mgr/dashboard/.editorconfig create mode 100644 src/pybind/mgr/dashboard/.gitignore create mode 100644 src/pybind/mgr/dashboard/.pylintrc create mode 100644 src/pybind/mgr/dashboard/CMakeLists.txt create mode 100644 src/pybind/mgr/dashboard/HACKING.rst create mode 100644 src/pybind/mgr/dashboard/README.rst create mode 100644 src/pybind/mgr/dashboard/__init__.py create mode 100644 src/pybind/mgr/dashboard/api/__init__.py create mode 100644 src/pybind/mgr/dashboard/api/doc.py create mode 100644 src/pybind/mgr/dashboard/awsauth.py create mode 100644 src/pybind/mgr/dashboard/cherrypy_backports.py create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh create mode 100644 src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py create mode 100644 src/pybind/mgr/dashboard/constraints.txt create mode 100755 src/pybind/mgr/dashboard/controllers/__init__.py create mode 100644 src/pybind/mgr/dashboard/controllers/_api_router.py create mode 100644 src/pybind/mgr/dashboard/controllers/_auth.py create mode 100644 src/pybind/mgr/dashboard/controllers/_base_controller.py create mode 100644 src/pybind/mgr/dashboard/controllers/_crud.py create mode 100644 src/pybind/mgr/dashboard/controllers/_docs.py create mode 100644 src/pybind/mgr/dashboard/controllers/_endpoint.py create mode 100644 src/pybind/mgr/dashboard/controllers/_helpers.py create mode 100644 src/pybind/mgr/dashboard/controllers/_paginate.py create mode 100644 src/pybind/mgr/dashboard/controllers/_permissions.py create mode 100644 src/pybind/mgr/dashboard/controllers/_rest_controller.py create mode 100644 src/pybind/mgr/dashboard/controllers/_router.py create mode 100644 src/pybind/mgr/dashboard/controllers/_task.py create mode 100644 src/pybind/mgr/dashboard/controllers/_ui_router.py create mode 100644 src/pybind/mgr/dashboard/controllers/_version.py create mode 100644 src/pybind/mgr/dashboard/controllers/auth.py create mode 100644 src/pybind/mgr/dashboard/controllers/ceph_users.py create mode 100644 src/pybind/mgr/dashboard/controllers/cephfs.py create mode 100644 src/pybind/mgr/dashboard/controllers/cluster.py create mode 100644 src/pybind/mgr/dashboard/controllers/cluster_configuration.py create mode 100644 src/pybind/mgr/dashboard/controllers/crush_rule.py create mode 100644 src/pybind/mgr/dashboard/controllers/daemon.py create mode 100644 src/pybind/mgr/dashboard/controllers/docs.py create mode 100644 src/pybind/mgr/dashboard/controllers/erasure_code_profile.py create mode 100644 src/pybind/mgr/dashboard/controllers/feedback.py create mode 100644 src/pybind/mgr/dashboard/controllers/frontend_logging.py create mode 100644 src/pybind/mgr/dashboard/controllers/grafana.py create mode 100644 src/pybind/mgr/dashboard/controllers/health.py create mode 100644 src/pybind/mgr/dashboard/controllers/home.py create mode 100644 src/pybind/mgr/dashboard/controllers/host.py create mode 100644 src/pybind/mgr/dashboard/controllers/iscsi.py create mode 100644 src/pybind/mgr/dashboard/controllers/logs.py create mode 100644 src/pybind/mgr/dashboard/controllers/mgr_modules.py create mode 100644 src/pybind/mgr/dashboard/controllers/monitor.py create mode 100644 src/pybind/mgr/dashboard/controllers/nfs.py create mode 100644 src/pybind/mgr/dashboard/controllers/orchestrator.py create mode 100644 src/pybind/mgr/dashboard/controllers/osd.py create mode 100644 src/pybind/mgr/dashboard/controllers/perf_counters.py create mode 100644 src/pybind/mgr/dashboard/controllers/pool.py create mode 100644 src/pybind/mgr/dashboard/controllers/prometheus.py create mode 100644 src/pybind/mgr/dashboard/controllers/rbd.py create mode 100644 src/pybind/mgr/dashboard/controllers/rbd_mirroring.py create mode 100644 src/pybind/mgr/dashboard/controllers/rgw.py create mode 100644 src/pybind/mgr/dashboard/controllers/role.py create mode 100644 src/pybind/mgr/dashboard/controllers/saml2.py create mode 100644 src/pybind/mgr/dashboard/controllers/service.py create mode 100644 src/pybind/mgr/dashboard/controllers/settings.py create mode 100644 src/pybind/mgr/dashboard/controllers/summary.py create mode 100644 src/pybind/mgr/dashboard/controllers/task.py create mode 100644 src/pybind/mgr/dashboard/controllers/telemetry.py create mode 100644 src/pybind/mgr/dashboard/controllers/user.py create mode 100644 src/pybind/mgr/dashboard/exceptions.py create mode 100644 src/pybind/mgr/dashboard/frontend/.browserslistrc create mode 100644 src/pybind/mgr/dashboard/frontend/.editorconfig create mode 100644 src/pybind/mgr/dashboard/frontend/.eslintrc.json create mode 100644 src/pybind/mgr/dashboard/frontend/.gherkin-lintrc create mode 100644 src/pybind/mgr/dashboard/frontend/.gitignore create mode 100644 src/pybind/mgr/dashboard/frontend/.htmllintrc create mode 100644 src/pybind/mgr/dashboard/frontend/.npmrc create mode 100644 src/pybind/mgr/dashboard/frontend/.prettierignore create mode 100644 src/pybind/mgr/dashboard/frontend/.prettierrc create mode 100644 src/pybind/mgr/dashboard/frontend/.stylelintrc create mode 100644 src/pybind/mgr/dashboard/frontend/CMakeLists.txt create mode 100644 src/pybind/mgr/dashboard/frontend/angular.json create mode 100644 src/pybind/mgr/dashboard/frontend/applitools.config.js create mode 100644 src/pybind/mgr/dashboard/frontend/babel.config.js create mode 100755 src/pybind/mgr/dashboard/frontend/cd.js create mode 100644 src/pybind/mgr/dashboard/frontend/cypress.config.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/a11y/dashboard.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/a11y/navigation.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/block/images.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/block/images.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/block/iscsi.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/block/iscsi.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/block/mirroring.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/block/mirroring.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/create-cluster.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/crush-map.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/crush-map.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/inventory.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/mgr-modules.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/mgr-modules.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/monitors.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/monitors.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/osds.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/osds.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/services.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/users.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/users.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/common/create-cluster/create-cluster.feature.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/common/forms-helper.feature.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/common/global.feature.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/common/grafana.feature.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/common/table-helper.feature.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/common/urls.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.feature create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolume-groups.e2e-spec.feature create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolumes.e2e-spec.feature create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/01-hosts.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/03-inventory.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/04-osds.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/05-services.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/grafana/grafana.feature create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/01-create-cluster-welcome.feature create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/02-create-cluster-add-host.feature create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/06-cluster-check.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/07-osds.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/08-hosts.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/09-services.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/10-nfs-exports.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/nfs/nfs-export.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/page-helper.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/daemons.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/daemons.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/roles.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/roles.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/users.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/users.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/api-docs.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/api-docs.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard-v3.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard-v3.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/language.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/language.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/login.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/login.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/navigation.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/navigation.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/notification.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/notification.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/role-mgmt.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/role-mgmt.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/user-mgmt.e2e-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/user-mgmt.po.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/visualTests/dashboard.vrt-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/e2e/visualTests/login.vrt-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/fixtures/block-rbd-status.json create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/fixtures/nfs-ganesha-status.json create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/fixtures/orchestrator/inventory.json create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/fixtures/orchestrator/services.json create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/fixtures/rgw-status.json create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/plugins/index.js create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/support/e2e.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/support/eyes-index.d.ts create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/tsconfig.json create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/119.066087561586659c.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/25.9d84971ea743706b.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/803.08339784f3bb5d16.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/Ceph_Logo.beb815b55d2e7363.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/Ceph_Ceph_Logo_with_text_red_white.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/Ceph_Ceph_Logo_with_text_white.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/Ceph_Logo.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/ceph_background.gif create mode 100755 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/loading.gif create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/logo-mini.png create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/assets/prometheus_logo.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/ceph_background.3fbdf95cd52530d7.gif create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/favicon.ico create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/forkawesome-webfont.23671bdbd055fa7b.woff create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/forkawesome-webfont.3217b1b06e001045.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/forkawesome-webfont.3b3951dce6cf5d60.ttf create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/forkawesome-webfont.c0fee260bb6fd5fd.eot create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/forkawesome-webfont.d0a4ad9e6369d510.woff2 create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/index.html create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/main.a87f559bb03ca0fb.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/polyfills.374f1f989f34e1be.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/prometheus_logo.8057911d27be9bb1.svg create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.a53144ca583f6e2c.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/scripts.177a7ad3f45b4499.js create mode 100644 src/pybind/mgr/dashboard/frontend/dist/en-US/styles.5f6140b407c420b8.css create mode 100644 src/pybind/mgr/dashboard/frontend/html-linter.config.json create mode 100644 src/pybind/mgr/dashboard/frontend/i18n.config.json create mode 100644 src/pybind/mgr/dashboard/frontend/jest.config.cjs create mode 100644 src/pybind/mgr/dashboard/frontend/ngcc.config.js create mode 100644 src/pybind/mgr/dashboard/frontend/package-lock.json create mode 100644 src/pybind/mgr/dashboard/frontend/package.json create mode 100644 src/pybind/mgr/dashboard/frontend/proxy.conf.json.sample create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/app-routing.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/app.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/app.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/app.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/app.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/app.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/block.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-setting/iscsi-setting.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-setting/iscsi-setting.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-setting/iscsi-setting.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-setting/iscsi-setting.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-tabs/iscsi-tabs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-tabs/iscsi-tabs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-tabs/iscsi-tabs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-tabs/iscsi-tabs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-details/iscsi-target-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-details/iscsi-target-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-details/iscsi-target-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-details/iscsi-target-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-discovery-modal/iscsi-target-discovery-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-discovery-modal/iscsi-target-discovery-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-discovery-modal/iscsi-target-discovery-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-discovery-modal/iscsi-target-discovery-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-form/iscsi-target-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-form/iscsi-target-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-form/iscsi-target-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-form/iscsi-target-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-image-settings-modal/iscsi-target-image-settings-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-image-settings-modal/iscsi-target-image-settings-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-image-settings-modal/iscsi-target-image-settings-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-image-settings-modal/iscsi-target-image-settings-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-iqn-settings-modal/iscsi-target-iqn-settings-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-iqn-settings-modal/iscsi-target-iqn-settings-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-iqn-settings-modal/iscsi-target-iqn-settings-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-iqn-settings-modal/iscsi-target-iqn-settings-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-list/iscsi-target-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-list/iscsi-target-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-list/iscsi-target-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-list/iscsi-target-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi/iscsi.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi/iscsi.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi/iscsi.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi/iscsi.component.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-create-modal/bootstrap-create-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-create-modal/bootstrap-create-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-create-modal/bootstrap-create-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-create-modal/bootstrap-create-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-import-modal/bootstrap-import-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-import-modal/bootstrap-import-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-import-modal/bootstrap-import-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-import-modal/bootstrap-import-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/daemon-list/daemon-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/daemon-list/daemon-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/daemon-list/daemon-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/daemon-list/daemon-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/image-list/image-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/image-list/image-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/image-list/image-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/image-list/image-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/mirror-health-color.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/mirror-health-color.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/mirroring.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/overview/overview.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/overview/overview.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/overview/overview.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/overview/overview.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-mode-modal/pool-edit-mode-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-mode-modal/pool-edit-mode-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-mode-modal/pool-edit-mode-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-mode-modal/pool-edit-mode-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-mode-modal/pool-edit-mode-response.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-peer-modal/pool-edit-peer-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-peer-modal/pool-edit-peer-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-peer-modal/pool-edit-peer-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-peer-modal/pool-edit-peer-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-peer-modal/pool-edit-peer-response.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-list/pool-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-list/pool-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-list/pool-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-list/pool-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-list/rbd-configuration-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-list/rbd-configuration-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-list/rbd-configuration-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-list/rbd-configuration-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-details/rbd-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-details/rbd-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-details/rbd-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-details/rbd-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-feature.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form-clone-request.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form-copy-request.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form-create-request.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form-edit-request.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form-mode.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form-response.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-parent.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-list/rbd-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-list/rbd-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-list/rbd-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-list/rbd-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-list/rbd-model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-form/rbd-namespace-form-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-form/rbd-namespace-form-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-form/rbd-namespace-form-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-form/rbd-namespace-form-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-list/rbd-namespace-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-list/rbd-namespace-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-list/rbd-namespace-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-namespace-list/rbd-namespace-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-performance/rbd-performance.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-performance/rbd-performance.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-performance/rbd-performance.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-performance/rbd-performance.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-form/rbd-snapshot-form-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-form/rbd-snapshot-form-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-form/rbd-snapshot-form-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-form/rbd-snapshot-form-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-list/rbd-snapshot-actions.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-list/rbd-snapshot-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-list/rbd-snapshot-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-list/rbd-snapshot-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-list/rbd-snapshot-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-snapshot-list/rbd-snapshot.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-tabs/rbd-tabs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-tabs/rbd-tabs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-tabs/rbd-tabs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-tabs/rbd-tabs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-list/rbd-trash-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-list/rbd-trash-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-list/rbd-trash-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-list/rbd-trash-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-move-modal/rbd-trash-move-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-move-modal/rbd-trash-move-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-move-modal/rbd-trash-move-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-move-modal/rbd-trash-move-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-purge-modal/rbd-trash-purge-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-purge-modal/rbd-trash-purge-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-purge-modal/rbd-trash-purge-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-purge-modal/rbd-trash-purge-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-restore-modal/rbd-trash-restore-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-restore-modal/rbd-trash-restore-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-restore-modal/rbd-trash-restore-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-trash-restore-modal/rbd-trash-restore-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/ceph.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-chart/cephfs-chart.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-chart/cephfs-chart.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-chart/cephfs-chart.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-chart/cephfs-chart.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-clients/cephfs-clients.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-clients/cephfs-clients.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-clients/cephfs-clients.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-clients/cephfs-clients.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-detail/cephfs-detail.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-detail/cephfs-detail.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-detail/cephfs-detail.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-detail/cephfs-detail.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-directories/cephfs-directories.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-directories/cephfs-directories.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-directories/cephfs-directories.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-directories/cephfs-directories.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-tabs/cephfs-tabs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-tabs/cephfs-tabs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-tabs/cephfs-tabs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-tabs/cephfs-tabs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/cluster.module.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-details/configuration-details.component.html create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-details/configuration-details.component.scss create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-details/configuration-details.component.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-details/configuration-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form-create-request.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/crushmap/crushmap.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/crushmap/crushmap.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/crushmap/crushmap.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/crushmap/crushmap.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/fixtures/host_list_response.json create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-details/host-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-details/host-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-details/host-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-details/host-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-form/host-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-form/host-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-form/host-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/host-form/host-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/hosts.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/hosts.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/hosts.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/hosts/hosts.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-devices/fixtures/inventory_list_response.json create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-devices/inventory-device.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-devices/inventory-devices.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-devices/inventory-devices.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-devices/inventory-devices.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-devices/inventory-devices.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-host.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/logs/logs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/logs/logs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/logs/logs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/logs/logs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-details/mgr-module-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-details/mgr-module-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-details/mgr-module-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-details/mgr-module-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-form/mgr-module-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-form/mgr-module-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-form/mgr-module-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-form/mgr-module-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-list/mgr-module-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-list/mgr-module-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-list/mgr-module-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-module-list/mgr-module-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/mgr-modules/mgr-modules.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/monitor/monitor.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/monitor/monitor.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/monitor/monitor.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/monitor/monitor.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-creation-preview-modal/osd-creation-preview-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-creation-preview-modal/osd-creation-preview-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-creation-preview-modal/osd-creation-preview-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-creation-preview-modal/osd-creation-preview-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-details/osd-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-details/osd-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-details/osd-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-details/osd-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-groups/devices-selection-change-event.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-groups/devices-selection-clear-event.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-groups/osd-devices-selection-groups.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-groups/osd-devices-selection-groups.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-groups/osd-devices-selection-groups.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-groups/osd-devices-selection-groups.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-modal/osd-devices-selection-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-modal/osd-devices-selection-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-modal/osd-devices-selection-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-devices-selection-modal/osd-devices-selection-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-indiv-modal/osd-flags-indiv-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-indiv-modal/osd-flags-indiv-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-indiv-modal/osd-flags-indiv-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-indiv-modal/osd-flags-indiv-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-modal/osd-flags-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-modal/osd-flags-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-modal/osd-flags-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-flags-modal/osd-flags-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-form/drive-group.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-form/osd-feature.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-form/osd-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-form/osd-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-form/osd-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-form/osd-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-list/fixtures/osd_list_response.json create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-list/osd-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-list/osd-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-list/osd-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-list/osd-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-pg-scrub-modal/osd-pg-scrub-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-pg-scrub-modal/osd-pg-scrub-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-pg-scrub-modal/osd-pg-scrub-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-pg-scrub-modal/osd-pg-scrub-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-pg-scrub-modal/osd-pg-scrub-modal.options.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-recv-speed-modal/osd-recv-speed-modal.component.html create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-recv-speed-modal/osd-recv-speed-modal.component.scss create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-recv-speed-modal/osd-recv-speed-modal.component.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-recv-speed-modal/osd-recv-speed-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-reweight-modal/osd-reweight-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-reweight-modal/osd-reweight-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-reweight-modal/osd-reweight-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-reweight-modal/osd-reweight-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-scrub-modal/osd-scrub-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-scrub-modal/osd-scrub-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-scrub-modal/osd-scrub-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/osd/osd-scrub-modal/osd-scrub-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/active-alert-list/active-alert-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/active-alert-list/active-alert-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/active-alert-list/active-alert-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/active-alert-list/active-alert-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/prometheus-tabs/prometheus-tabs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/prometheus-tabs/prometheus-tabs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/prometheus-tabs/prometheus-tabs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/prometheus-tabs/prometheus-tabs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/rules-list/rules-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/rules-list/rules-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/rules-list/rules-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/rules-list/rules-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-form/silence-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-form/silence-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-form/silence-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-form/silence-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-list/silence-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-list/silence-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-list/silence-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-list/silence-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-matcher-modal/silence-matcher-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-matcher-modal/silence-matcher-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-matcher-modal/silence-matcher-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/prometheus/silence-matcher-modal/silence-matcher-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/placement.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/placement.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-daemon-list/service-daemon-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-daemon-list/service-daemon-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-daemon-list/service-daemon-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-daemon-list/service-daemon-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-details/service-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-details/service-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-details/service-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-details/service-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-form/service-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-form/service-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-form/service-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/service-form/service-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/telemetry/telemetry.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/telemetry/telemetry.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/telemetry/telemetry.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/telemetry/telemetry.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-area-chart/dashboard-area-chart.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-area-chart/dashboard-area-chart.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-area-chart/dashboard-area-chart.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-area-chart/dashboard-area-chart.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-pie/dashboard-pie.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-pie/dashboard-pie.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-pie/dashboard-pie.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-pie/dashboard-pie.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-time-selector/dashboard-time-selector.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-time-selector/dashboard-time-selector.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-time-selector/dashboard-time-selector.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-time-selector/dashboard-time-selector.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard-v3.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard/dashboard-v3.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard/dashboard-v3.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard/dashboard-v3.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/dashboard/dashboard-v3.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/pg-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/pg-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/dashboard.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/dashboard/dashboard.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/dashboard/dashboard.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/dashboard/dashboard.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/dashboard/dashboard.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health-pie/health-pie.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health-pie/health-pie.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health-pie/health-pie.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health-pie/health-pie.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-card/info-card-popover.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-card/info-card.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-card/info-card.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-card/info-card.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-card/info-card.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-group/info-group.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-group/info-group.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-group/info-group.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/info-group/info-group.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/mds-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/mds-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/mgr-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/mgr-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/mon-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/mon-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/osd-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/osd-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/models/nfs.fsal.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-details/nfs-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-details/nfs-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-details/nfs-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-details/nfs-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form-client/nfs-form-client.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form-client/nfs-form-client.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form-client/nfs-form-client.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form-client/nfs-form-client.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-list/nfs-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-list/nfs-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-list/nfs-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-list/nfs-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/performance-counter.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/performance-counter/performance-counter.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/performance-counter/performance-counter.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/performance-counter/performance-counter.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/performance-counter/performance-counter.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/table-performance-counter/table-performance-counter.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/table-performance-counter/table-performance-counter.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/table-performance-counter/table-performance-counter.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/performance-counter/table-performance-counter/table-performance-counter.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/crush-rule-form-modal/crush-rule-form-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/crush-rule-form-modal/crush-rule-form-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/crush-rule-form-modal/crush-rule-form-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/crush-rule-form-modal/crush-rule-form-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/erasure-code-profile-form/erasure-code-profile-form-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/erasure-code-profile-form/erasure-code-profile-form-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/erasure-code-profile-form/erasure-code-profile-form-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/erasure-code-profile-form/erasure-code-profile-form-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-details/pool-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-details/pool-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-details/pool-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-details/pool-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-form/pool-form-data.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-form/pool-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-form/pool-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-form/pool-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-form/pool-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-list/pool-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-list/pool-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-list/pool-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-list/pool-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool-stat.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/pool/pool.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-bucket-encryption.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-bucket-mfa-delete.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-bucket-versioning.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-daemon.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-user-capabilities.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-user-capability.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-user-s3-key.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-user-subuser.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-user-swift-key.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-details/rgw-bucket-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-details/rgw-bucket-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-details/rgw-bucket-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-details/rgw-bucket-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-form/rgw-bucket-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-form/rgw-bucket-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-form/rgw-bucket-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-form/rgw-bucket-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-list/rgw-bucket-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-list/rgw-bucket-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-list/rgw-bucket-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-bucket-list/rgw-bucket-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-config-modal/rgw-config-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-config-modal/rgw-config-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-config-modal/rgw-config-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-config-modal/rgw-config-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-details/rgw-daemon-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-details/rgw-daemon-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-details/rgw-daemon-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-details/rgw-daemon-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-list/rgw-daemon-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-list/rgw-daemon-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-list/rgw-daemon-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-daemon-list/rgw-daemon-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-card-popover.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-capability-modal/rgw-user-capability-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-capability-modal/rgw-user-capability-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-capability-modal/rgw-user-capability-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-capability-modal/rgw-user-capability-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-details/rgw-user-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-details/rgw-user-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-details/rgw-user-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-details/rgw-user-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-form/rgw-user-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-form/rgw-user-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-form/rgw-user-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-form/rgw-user-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-list/rgw-user-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-list/rgw-user-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-list/rgw-user-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-list/rgw-user-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-s3-key-modal/rgw-user-s3-key-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-s3-key-modal/rgw-user-s3-key-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-s3-key-modal/rgw-user-s3-key-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-s3-key-modal/rgw-user-s3-key-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-subuser-modal/rgw-user-subuser-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-subuser-modal/rgw-user-subuser-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-subuser-modal/rgw-user-subuser-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-subuser-modal/rgw-user-subuser-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-swift-key-modal/rgw-user-swift-key-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-swift-key-modal/rgw-user-swift-key-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-swift-key-modal/rgw-user-swift-key-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-swift-key-modal/rgw-user-swift-key-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-tabs/rgw-user-tabs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-tabs/rgw-user-tabs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-tabs/rgw-user-tabs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-user-tabs/rgw-user-tabs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/ceph-shared.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/device-list/device-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/device-list/device-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/device-list/device-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/device-list/device-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/feedback/feedback.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/feedback/feedback.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/feedback/feedback.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/feedback/feedback.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/pg-category.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/pg-category.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/pg-category.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/fixtures/smart_data_version_1_0_ata_response.json create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/fixtures/smart_data_version_1_0_nvme_response.json create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/fixtures/smart_data_version_1_0_scsi_response.json create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/smart-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/smart-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/smart-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/ceph/shared/smart-list/smart-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/auth.module.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login-password-form/login-password-form.component.html create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login-password-form/login-password-form.component.scss create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login-password-form/login-password-form.component.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login-password-form/login-password-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login/login.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login/login.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login/login.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/login/login.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-details/role-details.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-details/role-details.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-details/role-details.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-details/role-details.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-form/role-form-mode.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-form/role-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-form/role-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-form/role-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-form/role-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-form/role-form.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-list/role-list.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-list/role-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-list/role-list.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/role-list/role-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-mode.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-role.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.model.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-list/user-list.component.html create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-list/user-list.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-list/user-list.component.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-list/user-list.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-password-form/user-password-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-password-form/user-password-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-password-form/user-password-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-password-form/user-password-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-tabs/user-tabs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-tabs/user-tabs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-tabs/user-tabs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-tabs/user-tabs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/context/context.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/context/context.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/context/context.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/context/context.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/core.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/error/error.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/error/error.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/error/error.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/error/error.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/error/error.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/blank-layout/blank-layout.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/blank-layout/blank-layout.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/blank-layout/blank-layout.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/blank-layout/blank-layout.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/login-layout/login-layout.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/login-layout/login-layout.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/login-layout/login-layout.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/login-layout/login-layout.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/workbench-layout/workbench-layout.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/workbench-layout/workbench-layout.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/workbench-layout/workbench-layout.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/layouts/workbench-layout/workbench-layout.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/about/about.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/about/about.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/about/about.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/about/about.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/administration/administration.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/administration/administration.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/administration/administration.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/administration/administration.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/api-docs/api-docs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/api-docs/api-docs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/api-docs/api-docs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/breadcrumbs/breadcrumbs.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/breadcrumbs/breadcrumbs.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/breadcrumbs/breadcrumbs.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/breadcrumbs/breadcrumbs.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/dashboard-help/dashboard-help.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/dashboard-help/dashboard-help.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/dashboard-help/dashboard-help.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/dashboard-help/dashboard-help.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/identity/identity.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/identity/identity.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/identity/identity.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/identity/identity.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/navigation.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/navigation/navigation.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/navigation/navigation.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/navigation/navigation.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/navigation/navigation.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/notifications/notifications.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/notifications/notifications.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/notifications/notifications.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/core/navigation/notifications/notifications.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/api-client.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/api-client.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/auth.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/auth.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/ceph-service.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/ceph-user.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume-group.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume-group.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cluster.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/cluster.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/configuration.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/configuration.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/crush-rule.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/crush-rule.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/custom-login-banner.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/custom-login-banner.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/daemon.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/daemon.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/erasure-code-profile.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/erasure-code-profile.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/feedback.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/feedback.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/health.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/health.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/host.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/host.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/iscsi.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/iscsi.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/logging.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/logging.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/logs.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/logs.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/mgr-module.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/mgr-module.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/monitor.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/monitor.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/motd.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/motd.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/nfs.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/nfs.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/orchestrator.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/orchestrator.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/osd.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/osd.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/paginate.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/performance-counter.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/performance-counter.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/pool.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/pool.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rbd-mirroring.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rbd-mirroring.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rbd.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rbd.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rbd.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-bucket.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-bucket.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-daemon.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-daemon.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-multisite.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-realm.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-realm.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-site.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-site.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-user.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-user.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zone.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zone.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zonegroup.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zonegroup.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/role.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/role.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/scope.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/scope.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/settings.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/settings.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/telemetry.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/telemetry.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/upgrade.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/upgrade.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/user.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/api/user.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/cd-helper.class.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/cd-helper.class.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/crush.node.selection.class.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/crush.node.selection.class.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/css-helper.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/list-with-details.class.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/table-status-view-cache.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/table-status-view-cache.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/table-status.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/classes/table-status.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/alert-panel/alert-panel.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/alert-panel/alert-panel.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/alert-panel/alert-panel.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/alert-panel/alert-panel.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/back-button/back-button.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/back-button/back-button.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/back-button/back-button.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/back-button/back-button.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card-row/card-row.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card-row/card-row.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card-row/card-row.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card-row/card-row.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/cd-label/cd-label.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/cd-label/cd-label.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/cd-label/cd-label.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/cd-label/cd-label.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/cd-label/color-class-from-text.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/components.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.types.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.types.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/confirmation-modal/confirmation-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/confirmation-modal/confirmation-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/confirmation-modal/confirmation-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/confirmation-modal/confirmation-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/copy2clipboard-button/copy2clipboard-button.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/copy2clipboard-button/copy2clipboard-button.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/copy2clipboard-button/copy2clipboard-button.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/copy2clipboard-button/copy2clipboard-button.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/custom-login-banner/custom-login-banner.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/custom-login-banner/custom-login-banner.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/custom-login-banner/custom-login-banner.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/custom-login-banner/custom-login-banner.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/doc/doc.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/doc/doc.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/doc/doc.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/doc/doc.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/download-button/download-button.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/download-button/download-button.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/download-button/download-button.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/download-button/download-button.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-button-panel/form-button-panel.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-button-panel/form-button-panel.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-button-panel/form-button-panel.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-button-panel/form-button-panel.component.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-modal/form-modal.component.html create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-modal/form-modal.component.scss create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-modal/form-modal.component.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/components/form-modal/form-modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/language-selector/language-selector.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/language-selector/language-selector.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/language-selector/language-selector.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/language-selector/language-selector.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/language-selector/supported-languages.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/loading-panel/loading-panel.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/loading-panel/loading-panel.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/loading-panel/loading-panel.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/loading-panel/loading-panel.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/modal/modal.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/modal/modal.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/modal/modal.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/modal/modal.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/notifications-sidebar/notifications-sidebar.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/notifications-sidebar/notifications-sidebar.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/notifications-sidebar/notifications-sidebar.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/notifications-sidebar/notifications-sidebar.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/orchestrator-doc-panel/orchestrator-doc-panel.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/orchestrator-doc-panel/orchestrator-doc-panel.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/orchestrator-doc-panel/orchestrator-doc-panel.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/orchestrator-doc-panel/orchestrator-doc-panel.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/pwd-expiration-notification/pwd-expiration-notification.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/pwd-expiration-notification/pwd-expiration-notification.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/pwd-expiration-notification/pwd-expiration-notification.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/pwd-expiration-notification/pwd-expiration-notification.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/refresh-selector/refresh-selector.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/refresh-selector/refresh-selector.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/refresh-selector/refresh-selector.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/refresh-selector/refresh-selector.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select-badges/select-badges.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select-badges/select-badges.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select-badges/select-badges.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select-badges/select-badges.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select/select-messages.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select/select-option.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select/select.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select/select.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select/select.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/select/select.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/sparkline/sparkline.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/sparkline/sparkline.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/sparkline/sparkline.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/sparkline/sparkline.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/submit-button/submit-button.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/submit-button/submit-button.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/submit-button/submit-button.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/submit-button/submit-button.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/telemetry-notification/telemetry-notification.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/telemetry-notification/telemetry-notification.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/telemetry-notification/telemetry-notification.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/telemetry-notification/telemetry-notification.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/usage-bar/usage-bar.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/usage-bar/usage-bar.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/usage-bar/usage-bar.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/usage-bar/usage-bar.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/constants/app.constants.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/crud-table/crud-table.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/crud-table/crud-table.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/crud-table/crud-table.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/crud-table/crud-table.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/datatable.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-actions/table-actions.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-actions/table-actions.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-actions/table-actions.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-actions/table-actions.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-key-value/table-key-value.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-key-value/table-key-value.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-key-value/table-key-value.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-key-value/table-key-value.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-pagination/table-pagination.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-pagination/table-pagination.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-pagination/table-pagination.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table-pagination/table-pagination.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table/table.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table/table.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table/table.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/table/table.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/decorators/cd-encode.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/decorators/cd-encode.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/auth-storage.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/auth-storage.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/autofocus.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/autofocus.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/dimless-binary-per-second.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/dimless-binary-per-second.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/dimless-binary.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/dimless-binary.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/directives.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-input-disable.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-input-disable.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-loading.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-loading.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-scope.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-scope.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/iops.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/iops.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/milliseconds.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/milliseconds.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/ng-bootstrap-form-validation/cd-form-control.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/ng-bootstrap-form-validation/cd-form-control.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/ng-bootstrap-form-validation/cd-form-group.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/ng-bootstrap-form-validation/cd-form-group.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/ng-bootstrap-form-validation/cd-form-validation.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/ng-bootstrap-form-validation/cd-form-validation.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/password-button.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/password-button.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/stateful-tab.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/stateful-tab.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/trim.directive.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/directives/trim.directive.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/cell-template.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/components.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/dashboard-promqls.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/health-color.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/health-icon.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/health-label.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/icons.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/notification-type.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/unix_errno.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/enum/view-cache-status.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-form-builder.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-form-builder.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-form-group.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-form-group.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-form.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-form.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-validators.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/cd-validators.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/crud-form.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/crud-form.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/crud-form.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/crud-form.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/crud-form.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-array-type/formly-array-type.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-array-type/formly-array-type.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-array-type/formly-array-type.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-array-type/formly-array-type.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-file-type/formly-file-type-accessor.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-file-type/formly-file-type.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-file-type/formly-file-type.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-file-type/formly-file-type.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-file-type/formly-file-type.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-type/formly-input-type.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-type/formly-input-type.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-type/formly-input-type.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-type/formly-input-type.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-wrapper/formly-input-wrapper.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-wrapper/formly-input-wrapper.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-wrapper/formly-input-wrapper.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-input-wrapper/formly-input-wrapper.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-object-type/formly-object-type.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-object-type/formly-object-type.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-object-type/formly-object-type.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-object-type/formly-object-type.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-textarea-type/formly-textarea-type.component.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-textarea-type/formly-textarea-type.component.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-textarea-type/formly-textarea-type.component.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/formly-textarea-type/formly-textarea-type.component.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/helpers.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/validators/file-validator.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/validators/json-validator.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/forms/crud-form/validators/rgw-role-validator.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/helpers/helpers.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/helpers/prometheus-list-helper.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/alertmanager-silence.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/breadcrumbs.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-details.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-form-modal-field-config.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-notification.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-notification.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-pwd-expiration-settings.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-pwd-policy-settings.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-action.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-column-filter.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-column-filters-change.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-column.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-fetch-data-context.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-paging.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-selection.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-user-config.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-directory-models.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-subvolume-group.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-subvolume.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-subvolumegroup.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/chart-tooltip.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/configuration.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/credentials.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/crud-table-metadata.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/crush-node.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/crush-rule.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/crush-step.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/daemon.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/devices.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/erasure-code-profile.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/executing-task.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/finished-task.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/flag.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/image-spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/inventory-device-type.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/login-response.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/mirroring-summary.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/orchestrator.enum.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/orchestrator.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/osd-deployment-options.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/osd-settings.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/permission.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/permissions.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/pool-form-info.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/prometheus-alerts.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/service.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/smart.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/summary.model.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/task-exception.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/task.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/upgrade.interface.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/models/wizard-steps.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/array.pipe.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/array.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/boolean-text.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/boolean-text.pipe.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/boolean.pipe.spec.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/boolean.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/cd-date.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/cd-date.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/ceph-release-name.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/ceph-release-name.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/ceph-short-version.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/ceph-short-version.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/dimless-binary-per-second.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/dimless-binary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/dimless-binary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/dimless.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/dimless.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/duration.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/duration.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/empty.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/empty.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/encode-uri.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/encode-uri.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/filter.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/filter.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/health-color.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/health-color.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/health-icon.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/health-icon.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/health-label.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/health-label.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/iops.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/iops.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/iscsi-backstore.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/iscsi-backstore.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/join.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/join.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/log-priority.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/log-priority.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/map.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/map.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/mds-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/mds-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/mgr-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/mgr-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/milliseconds.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/milliseconds.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/not-available.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/not-available.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/octal-to-human-readable.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/octal-to-human-readable.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/ordinal.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/ordinal.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/osd-summary.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/osd-summary.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/path.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/path.pipe.ts create mode 100755 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/pipes.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/rbd-configuration-source.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/rbd-configuration-source.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/relative-date.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/relative-date.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/round.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/round.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/sanitize-html.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/sanitize-html.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/search-highlight.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/search-highlight.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/truncate.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/truncate.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/upper-first.pipe.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/upper-first.pipe.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/rxjs/operators/page-visibilty.operator.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/api-interceptor.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/api-interceptor.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/auth-guard.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/auth-guard.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/auth-storage.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/auth-storage.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/cd-table-server-side.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/cd-table-server-side.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/change-password-guard.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/change-password-guard.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/crud-form-adapter.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/crud-form-adapter.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/data-gateway.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/data-gateway.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/device.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/device.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/doc.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/doc.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/favicon.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/favicon.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/feature-toggles-guard.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/feature-toggles-guard.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/feature-toggles.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/feature-toggles.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/formatter.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/formatter.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/js-error-handler.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/language.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/language.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/modal.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/modal.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/module-status-guard.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/module-status-guard.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/motd-notification.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/motd-notification.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/ngzone-scheduler.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/no-sso-guard.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/no-sso-guard.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/notification.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/notification.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/number-formatter.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/number-formatter.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/password-policy.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/password-policy.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert-formatter.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert-formatter.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-notification.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-notification.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-silence-matcher.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-silence-matcher.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/rbd-configuration.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/rbd-configuration.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/refresh-interval.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/refresh-interval.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/summary.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/summary.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-list.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-list.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-manager.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-manager.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-message.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-message.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-wrapper.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-wrapper.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/telemetry-notification.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/telemetry-notification.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/text-to-download.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/text-to-download.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/time-diff.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/time-diff.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/timer.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/timer.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/url-builder.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/url-builder.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/wizard-steps.service.spec.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/services/wizard-steps.service.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/app/shared/shared.module.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/.gitkeep create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/Ceph_Ceph_Logo_with_text_red_white.svg create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/Ceph_Ceph_Logo_with_text_white.svg create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/Ceph_Logo.svg create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/ceph_background.gif create mode 100755 src/pybind/mgr/dashboard/frontend/src/assets/loading.gif create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/logo-mini.png create mode 100644 src/pybind/mgr/dashboard/frontend/src/assets/prometheus_logo.svg create mode 100644 src/pybind/mgr/dashboard/frontend/src/environments/environment.tpl.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/favicon.ico create mode 100644 src/pybind/mgr/dashboard/frontend/src/index.html create mode 100644 src/pybind/mgr/dashboard/frontend/src/jestGlobalMocks.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.cs.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.de-DE.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.es-ES.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.fr-FR.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.id-ID.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.it-IT.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.ja-JP.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.ko-KR.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.pl-PL.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.pt-BR.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.zh-CN.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/locale/messages.zh-TW.xlf create mode 100644 src/pybind/mgr/dashboard/frontend/src/main.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/polyfills.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/setupJest.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/_chart-tooltip.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/bootstrap-extends.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_basics.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_buttons.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_dropdown.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_forms.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_grid.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_icons.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_index.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_navs.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/ceph-custom/_toast.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/defaults/_bootstrap-defaults.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/defaults/_functions.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/defaults/_index.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/defaults/_mixins.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/vendor/_index.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/vendor/_style-overrides.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/styles/vendor/_variables.scss create mode 100644 src/pybind/mgr/dashboard/frontend/src/testing/activated-route-stub.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/testing/unit-test-helper.ts create mode 100644 src/pybind/mgr/dashboard/frontend/src/typings.d.ts create mode 100644 src/pybind/mgr/dashboard/frontend/tsconfig.app.json create mode 100644 src/pybind/mgr/dashboard/frontend/tsconfig.json create mode 100644 src/pybind/mgr/dashboard/frontend/tsconfig.spec.json create mode 100644 src/pybind/mgr/dashboard/grafana.py create mode 100644 src/pybind/mgr/dashboard/model/__init__.py create mode 100644 src/pybind/mgr/dashboard/module.py create mode 100644 src/pybind/mgr/dashboard/openapi.yaml create mode 100644 src/pybind/mgr/dashboard/plugins/__init__.py create mode 100644 src/pybind/mgr/dashboard/plugins/debug.py create mode 100644 src/pybind/mgr/dashboard/plugins/feature_toggles.py create mode 100644 src/pybind/mgr/dashboard/plugins/interfaces.py create mode 100644 src/pybind/mgr/dashboard/plugins/lru_cache.py create mode 100644 src/pybind/mgr/dashboard/plugins/motd.py create mode 100644 src/pybind/mgr/dashboard/plugins/pluggy.py create mode 100644 src/pybind/mgr/dashboard/plugins/plugin.py create mode 100644 src/pybind/mgr/dashboard/plugins/ttl_cache.py create mode 100644 src/pybind/mgr/dashboard/requirements-extra.txt create mode 100644 src/pybind/mgr/dashboard/requirements-lint.txt create mode 100644 src/pybind/mgr/dashboard/requirements-test.txt create mode 100644 src/pybind/mgr/dashboard/requirements.txt create mode 100644 src/pybind/mgr/dashboard/rest_client.py create mode 100755 src/pybind/mgr/dashboard/run-backend-api-request.sh create mode 100755 src/pybind/mgr/dashboard/run-backend-api-tests.sh create mode 100755 src/pybind/mgr/dashboard/run-backend-rook-api-request.sh create mode 100755 src/pybind/mgr/dashboard/run-frontend-e2e-tests.sh create mode 100755 src/pybind/mgr/dashboard/run-frontend-unittests.sh create mode 100644 src/pybind/mgr/dashboard/security.py create mode 100644 src/pybind/mgr/dashboard/services/__init__.py create mode 100644 src/pybind/mgr/dashboard/services/_paginate.py create mode 100644 src/pybind/mgr/dashboard/services/access_control.py create mode 100644 src/pybind/mgr/dashboard/services/auth.py create mode 100644 src/pybind/mgr/dashboard/services/ceph_service.py create mode 100644 src/pybind/mgr/dashboard/services/cephfs.py create mode 100644 src/pybind/mgr/dashboard/services/cluster.py create mode 100644 src/pybind/mgr/dashboard/services/exception.py create mode 100644 src/pybind/mgr/dashboard/services/iscsi_cli.py create mode 100644 src/pybind/mgr/dashboard/services/iscsi_client.py create mode 100644 src/pybind/mgr/dashboard/services/iscsi_config.py create mode 100644 src/pybind/mgr/dashboard/services/orchestrator.py create mode 100644 src/pybind/mgr/dashboard/services/osd.py create mode 100644 src/pybind/mgr/dashboard/services/progress.py create mode 100644 src/pybind/mgr/dashboard/services/rbd.py create mode 100644 src/pybind/mgr/dashboard/services/rgw_client.py create mode 100644 src/pybind/mgr/dashboard/services/settings.py create mode 100644 src/pybind/mgr/dashboard/services/sso.py create mode 100644 src/pybind/mgr/dashboard/services/tcmu_service.py create mode 100644 src/pybind/mgr/dashboard/settings.py create mode 100644 src/pybind/mgr/dashboard/tests/__init__.py create mode 100644 src/pybind/mgr/dashboard/tests/helper.py create mode 100644 src/pybind/mgr/dashboard/tests/test_access_control.py create mode 100644 src/pybind/mgr/dashboard/tests/test_api_auditing.py create mode 100644 src/pybind/mgr/dashboard/tests/test_auth.py create mode 100644 src/pybind/mgr/dashboard/tests/test_cache.py create mode 100644 src/pybind/mgr/dashboard/tests/test_ceph_service.py create mode 100644 src/pybind/mgr/dashboard/tests/test_ceph_users.py create mode 100644 src/pybind/mgr/dashboard/tests/test_cephfs.py create mode 100644 src/pybind/mgr/dashboard/tests/test_cluster_upgrade.py create mode 100644 src/pybind/mgr/dashboard/tests/test_controllers.py create mode 100644 src/pybind/mgr/dashboard/tests/test_crud.py create mode 100644 src/pybind/mgr/dashboard/tests/test_daemon.py create mode 100644 src/pybind/mgr/dashboard/tests/test_docs.py create mode 100644 src/pybind/mgr/dashboard/tests/test_erasure_code_profile.py create mode 100644 src/pybind/mgr/dashboard/tests/test_exceptions.py create mode 100644 src/pybind/mgr/dashboard/tests/test_feature_toggles.py create mode 100644 src/pybind/mgr/dashboard/tests/test_grafana.py create mode 100644 src/pybind/mgr/dashboard/tests/test_home.py create mode 100644 src/pybind/mgr/dashboard/tests/test_host.py create mode 100644 src/pybind/mgr/dashboard/tests/test_iscsi.py create mode 100644 src/pybind/mgr/dashboard/tests/test_nfs.py create mode 100644 src/pybind/mgr/dashboard/tests/test_notification.py create mode 100644 src/pybind/mgr/dashboard/tests/test_orchestrator.py create mode 100644 src/pybind/mgr/dashboard/tests/test_osd.py create mode 100644 src/pybind/mgr/dashboard/tests/test_plugin_debug.py create mode 100644 src/pybind/mgr/dashboard/tests/test_pool.py create mode 100644 src/pybind/mgr/dashboard/tests/test_prometheus.py create mode 100644 src/pybind/mgr/dashboard/tests/test_rbd_mirroring.py create mode 100644 src/pybind/mgr/dashboard/tests/test_rbd_service.py create mode 100644 src/pybind/mgr/dashboard/tests/test_rest_client.py create mode 100644 src/pybind/mgr/dashboard/tests/test_rest_tasks.py create mode 100644 src/pybind/mgr/dashboard/tests/test_rgw.py create mode 100644 src/pybind/mgr/dashboard/tests/test_rgw_client.py create mode 100644 src/pybind/mgr/dashboard/tests/test_settings.py create mode 100644 src/pybind/mgr/dashboard/tests/test_ssl.py create mode 100644 src/pybind/mgr/dashboard/tests/test_sso.py create mode 100644 src/pybind/mgr/dashboard/tests/test_task.py create mode 100644 src/pybind/mgr/dashboard/tests/test_tools.py create mode 100644 src/pybind/mgr/dashboard/tests/test_versioning.py create mode 100644 src/pybind/mgr/dashboard/tools.py create mode 100644 src/pybind/mgr/dashboard/tox.ini create mode 100644 src/pybind/mgr/devicehealth/__init__.py create mode 100644 src/pybind/mgr/devicehealth/module.py create mode 100644 src/pybind/mgr/diskprediction_local/__init__.py create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/config.json create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_1.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_10.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_104.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_105.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_109.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_112.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_114.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_115.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_118.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_119.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_12.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_120.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_123.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_124.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_125.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_128.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_131.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_134.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_138.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_14.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_141.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_145.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_151.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_16.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_161.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_168.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_169.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_174.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_18.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_182.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_185.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_186.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_195.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_201.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_204.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_206.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_208.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_210.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_212.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_213.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_219.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_221.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_222.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_223.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_225.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_227.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_229.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_230.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_234.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_235.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_236.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_239.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_243.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_27.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_3.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_33.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_36.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_44.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_50.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_57.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_59.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_6.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_61.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_62.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_67.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_69.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_71.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_72.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_78.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_79.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_82.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_85.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_88.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_93.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/prophetstor/svm_97.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/redhat/config.json create mode 100644 src/pybind/mgr/diskprediction_local/models/redhat/hgst_predictor.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/redhat/hgst_scaler.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/redhat/seagate_predictor.pkl create mode 100644 src/pybind/mgr/diskprediction_local/models/redhat/seagate_scaler.pkl create mode 100644 src/pybind/mgr/diskprediction_local/module.py create mode 100644 src/pybind/mgr/diskprediction_local/predictor.py create mode 100644 src/pybind/mgr/diskprediction_local/requirements.txt create mode 100644 src/pybind/mgr/feedback/__init__.py create mode 100644 src/pybind/mgr/feedback/model.py create mode 100644 src/pybind/mgr/feedback/module.py create mode 100644 src/pybind/mgr/feedback/service.py create mode 100644 src/pybind/mgr/hello/__init__.py create mode 100644 src/pybind/mgr/hello/module.py create mode 100644 src/pybind/mgr/influx/__init__.py create mode 100644 src/pybind/mgr/influx/module.py create mode 100644 src/pybind/mgr/insights/__init__.py create mode 100644 src/pybind/mgr/insights/health.py create mode 100644 src/pybind/mgr/insights/module.py create mode 100644 src/pybind/mgr/insights/tests/__init__.py create mode 100644 src/pybind/mgr/insights/tests/test_health.py create mode 100644 src/pybind/mgr/iostat/__init__.py create mode 100644 src/pybind/mgr/iostat/module.py create mode 100644 src/pybind/mgr/k8sevents/README.md create mode 100644 src/pybind/mgr/k8sevents/__init__.py create mode 100644 src/pybind/mgr/k8sevents/module.py create mode 100644 src/pybind/mgr/k8sevents/rbac_sample.yaml create mode 100644 src/pybind/mgr/localpool/__init__.py create mode 100644 src/pybind/mgr/localpool/module.py create mode 100644 src/pybind/mgr/mds_autoscaler/__init__.py create mode 100644 src/pybind/mgr/mds_autoscaler/module.py create mode 100644 src/pybind/mgr/mds_autoscaler/tests/__init__.py create mode 100644 src/pybind/mgr/mds_autoscaler/tests/test_autoscaler.py create mode 100644 src/pybind/mgr/mgr_module.py create mode 100644 src/pybind/mgr/mgr_util.py create mode 100644 src/pybind/mgr/mirroring/__init__.py create mode 100644 src/pybind/mgr/mirroring/fs/__init__.py create mode 100644 src/pybind/mgr/mirroring/fs/blocklist.py create mode 100644 src/pybind/mgr/mirroring/fs/dir_map/__init__.py create mode 100644 src/pybind/mgr/mirroring/fs/dir_map/create.py create mode 100644 src/pybind/mgr/mirroring/fs/dir_map/load.py create mode 100644 src/pybind/mgr/mirroring/fs/dir_map/policy.py create mode 100644 src/pybind/mgr/mirroring/fs/dir_map/state_transition.py create mode 100644 src/pybind/mgr/mirroring/fs/dir_map/update.py create mode 100644 src/pybind/mgr/mirroring/fs/exception.py create mode 100644 src/pybind/mgr/mirroring/fs/notify.py create mode 100644 src/pybind/mgr/mirroring/fs/snapshot_mirror.py create mode 100644 src/pybind/mgr/mirroring/fs/utils.py create mode 100644 src/pybind/mgr/mirroring/module.py create mode 100644 src/pybind/mgr/nfs/__init__.py create mode 100644 src/pybind/mgr/nfs/cluster.py create mode 100644 src/pybind/mgr/nfs/exception.py create mode 100644 src/pybind/mgr/nfs/export.py create mode 100644 src/pybind/mgr/nfs/ganesha_conf.py create mode 100644 src/pybind/mgr/nfs/module.py create mode 100644 src/pybind/mgr/nfs/tests/__init__.py create mode 100644 src/pybind/mgr/nfs/tests/test_nfs.py create mode 100644 src/pybind/mgr/nfs/utils.py create mode 100644 src/pybind/mgr/object_format.py create mode 100644 src/pybind/mgr/orchestrator/README.md create mode 100644 src/pybind/mgr/orchestrator/__init__.py create mode 100644 src/pybind/mgr/orchestrator/_interface.py create mode 100644 src/pybind/mgr/orchestrator/module.py create mode 100644 src/pybind/mgr/orchestrator/tests/__init__.py create mode 100644 src/pybind/mgr/orchestrator/tests/test_orchestrator.py create mode 100644 src/pybind/mgr/osd_perf_query/__init__.py create mode 100644 src/pybind/mgr/osd_perf_query/module.py create mode 100644 src/pybind/mgr/osd_support/__init__.py create mode 100644 src/pybind/mgr/osd_support/module.py create mode 100644 src/pybind/mgr/pg_autoscaler/__init__.py create mode 100644 src/pybind/mgr/pg_autoscaler/module.py create mode 100644 src/pybind/mgr/pg_autoscaler/tests/__init__.py create mode 100644 src/pybind/mgr/pg_autoscaler/tests/test_cal_final_pg_target.py create mode 100644 src/pybind/mgr/pg_autoscaler/tests/test_cal_ratio.py create mode 100644 src/pybind/mgr/pg_autoscaler/tests/test_overlapping_roots.py create mode 100644 src/pybind/mgr/progress/__init__.py create mode 100644 src/pybind/mgr/progress/module.py create mode 100644 src/pybind/mgr/progress/test_progress.py create mode 100644 src/pybind/mgr/prometheus/__init__.py create mode 100644 src/pybind/mgr/prometheus/module.py create mode 100644 src/pybind/mgr/prometheus/test_module.py create mode 100644 src/pybind/mgr/rbd_support/__init__.py create mode 100644 src/pybind/mgr/rbd_support/common.py create mode 100644 src/pybind/mgr/rbd_support/mirror_snapshot_schedule.py create mode 100644 src/pybind/mgr/rbd_support/module.py create mode 100644 src/pybind/mgr/rbd_support/perf.py create mode 100644 src/pybind/mgr/rbd_support/schedule.py create mode 100644 src/pybind/mgr/rbd_support/task.py create mode 100644 src/pybind/mgr/rbd_support/trash_purge_schedule.py create mode 100644 src/pybind/mgr/requirements-required.txt create mode 100644 src/pybind/mgr/requirements.txt create mode 100644 src/pybind/mgr/restful/__init__.py create mode 100644 src/pybind/mgr/restful/api/__init__.py create mode 100644 src/pybind/mgr/restful/api/config.py create mode 100644 src/pybind/mgr/restful/api/crush.py create mode 100644 src/pybind/mgr/restful/api/doc.py create mode 100644 src/pybind/mgr/restful/api/mon.py create mode 100644 src/pybind/mgr/restful/api/osd.py create mode 100644 src/pybind/mgr/restful/api/perf.py create mode 100644 src/pybind/mgr/restful/api/pool.py create mode 100644 src/pybind/mgr/restful/api/request.py create mode 100644 src/pybind/mgr/restful/api/server.py create mode 100644 src/pybind/mgr/restful/common.py create mode 100644 src/pybind/mgr/restful/context.py create mode 100644 src/pybind/mgr/restful/decorators.py create mode 100644 src/pybind/mgr/restful/hooks.py create mode 100644 src/pybind/mgr/restful/module.py create mode 100644 src/pybind/mgr/rgw/__init__.py create mode 100644 src/pybind/mgr/rgw/module.py create mode 100644 src/pybind/mgr/rook/.gitignore create mode 100644 src/pybind/mgr/rook/CMakeLists.txt create mode 100644 src/pybind/mgr/rook/__init__.py create mode 100644 src/pybind/mgr/rook/ci/Dockerfile create mode 100755 src/pybind/mgr/rook/ci/run-rook-e2e-tests.sh create mode 100755 src/pybind/mgr/rook/ci/scripts/bootstrap-rook-cluster.sh create mode 100644 src/pybind/mgr/rook/ci/tests/features/rook.feature create mode 100644 src/pybind/mgr/rook/ci/tests/features/steps/implementation.py create mode 100644 src/pybind/mgr/rook/ci/tests/features/steps/utils.py create mode 100755 src/pybind/mgr/rook/generate_rook_ceph_client.sh create mode 100644 src/pybind/mgr/rook/module.py create mode 100644 src/pybind/mgr/rook/requirements.txt create mode 100644 src/pybind/mgr/rook/rook-client-python/.github/workflows/generate.yml create mode 100644 src/pybind/mgr/rook/rook-client-python/.gitignore create mode 100644 src/pybind/mgr/rook/rook-client-python/LICENSE create mode 100644 src/pybind/mgr/rook/rook-client-python/README.md create mode 100644 src/pybind/mgr/rook/rook-client-python/conftest.py create mode 100755 src/pybind/mgr/rook/rook-client-python/generate.sh create mode 100644 src/pybind/mgr/rook/rook-client-python/generate_model_classes.py create mode 100644 src/pybind/mgr/rook/rook-client-python/mypy.ini create mode 100644 src/pybind/mgr/rook/rook-client-python/requirements.txt create mode 100644 src/pybind/mgr/rook/rook-client-python/rook-python-client-demo.gif create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/__init__.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/_helper.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/_helper.py.orig create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/cassandra/__init__.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/cassandra/cluster.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/__init__.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephblockpool.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephclient.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephcluster.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephfilesystem.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephfilesystemmirror.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephnfs.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephobjectrealm.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephobjectstore.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephobjectstoreuser.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephobjectzone.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephobjectzonegroup.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/cephrbdmirror.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/objectbucket.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/objectbucketclaim.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/volume.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/volumereplication.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/ceph/volumereplicationclass.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/py.typed create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/tests/__init__.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/tests/test_README.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/tests/test_examples.py create mode 100644 src/pybind/mgr/rook/rook-client-python/rook_client/tests/test_properties.py create mode 100644 src/pybind/mgr/rook/rook-client-python/setup.py create mode 100644 src/pybind/mgr/rook/rook-client-python/tox.ini create mode 100644 src/pybind/mgr/rook/rook_client/__init__.py create mode 100644 src/pybind/mgr/rook/rook_client/_helper.py create mode 100644 src/pybind/mgr/rook/rook_cluster.py create mode 100644 src/pybind/mgr/rook/tests/__init__.py create mode 100644 src/pybind/mgr/rook/tests/fixtures.py create mode 100644 src/pybind/mgr/rook/tests/test_placement.py create mode 100644 src/pybind/mgr/rook/tests/test_rook.py create mode 100644 src/pybind/mgr/selftest/__init__.py create mode 100644 src/pybind/mgr/selftest/module.py create mode 100644 src/pybind/mgr/snap_schedule/.gitignore create mode 100644 src/pybind/mgr/snap_schedule/__init__.py create mode 100644 src/pybind/mgr/snap_schedule/fs/__init__.py create mode 100644 src/pybind/mgr/snap_schedule/fs/schedule.py create mode 100644 src/pybind/mgr/snap_schedule/fs/schedule_client.py create mode 100644 src/pybind/mgr/snap_schedule/module.py create mode 100644 src/pybind/mgr/snap_schedule/requirements.txt create mode 100644 src/pybind/mgr/snap_schedule/tests/__init__.py create mode 100644 src/pybind/mgr/snap_schedule/tests/conftest.py create mode 100644 src/pybind/mgr/snap_schedule/tests/fs/__init__.py create mode 100644 src/pybind/mgr/snap_schedule/tests/fs/test_schedule.py create mode 100644 src/pybind/mgr/snap_schedule/tests/fs/test_schedule_client.py create mode 100644 src/pybind/mgr/snap_schedule/tox.ini create mode 100644 src/pybind/mgr/stats/__init__.py create mode 100644 src/pybind/mgr/stats/fs/__init__.py create mode 100644 src/pybind/mgr/stats/fs/perf_stats.py create mode 100644 src/pybind/mgr/stats/module.py create mode 100644 src/pybind/mgr/status/__init__.py create mode 100644 src/pybind/mgr/status/module.py create mode 100644 src/pybind/mgr/telegraf/__init__.py create mode 100644 src/pybind/mgr/telegraf/basesocket.py create mode 100644 src/pybind/mgr/telegraf/module.py create mode 100644 src/pybind/mgr/telegraf/protocol.py create mode 100644 src/pybind/mgr/telegraf/utils.py create mode 100644 src/pybind/mgr/telemetry/__init__.py create mode 100644 src/pybind/mgr/telemetry/module.py create mode 100644 src/pybind/mgr/telemetry/tests/__init__.py create mode 100644 src/pybind/mgr/telemetry/tests/test_telemetry.py create mode 100644 src/pybind/mgr/telemetry/tox.ini create mode 100644 src/pybind/mgr/test_orchestrator/README.md create mode 100644 src/pybind/mgr/test_orchestrator/__init__.py create mode 100644 src/pybind/mgr/test_orchestrator/dummy_data.json create mode 100644 src/pybind/mgr/test_orchestrator/module.py create mode 100644 src/pybind/mgr/tests/__init__.py create mode 100644 src/pybind/mgr/tests/test_mgr_util.py create mode 100644 src/pybind/mgr/tests/test_object_format.py create mode 100644 src/pybind/mgr/tests/test_tls.py create mode 100644 src/pybind/mgr/tox.ini create mode 100644 src/pybind/mgr/volumes/__init__.py create mode 100644 src/pybind/mgr/volumes/fs/__init__.py create mode 100644 src/pybind/mgr/volumes/fs/async_cloner.py create mode 100644 src/pybind/mgr/volumes/fs/async_job.py create mode 100644 src/pybind/mgr/volumes/fs/exception.py create mode 100644 src/pybind/mgr/volumes/fs/fs_util.py create mode 100644 src/pybind/mgr/volumes/fs/operations/__init__.py create mode 100644 src/pybind/mgr/volumes/fs/operations/access.py create mode 100644 src/pybind/mgr/volumes/fs/operations/clone_index.py create mode 100644 src/pybind/mgr/volumes/fs/operations/group.py create mode 100644 src/pybind/mgr/volumes/fs/operations/index.py create mode 100644 src/pybind/mgr/volumes/fs/operations/lock.py create mode 100644 src/pybind/mgr/volumes/fs/operations/pin_util.py create mode 100644 src/pybind/mgr/volumes/fs/operations/rankevicter.py create mode 100644 src/pybind/mgr/volumes/fs/operations/resolver.py create mode 100644 src/pybind/mgr/volumes/fs/operations/snapshot_util.py create mode 100644 src/pybind/mgr/volumes/fs/operations/subvolume.py create mode 100644 src/pybind/mgr/volumes/fs/operations/template.py create mode 100644 src/pybind/mgr/volumes/fs/operations/trash.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/__init__.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/auth_metadata.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/metadata_manager.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/op_sm.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/subvolume_attrs.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py create mode 100644 src/pybind/mgr/volumes/fs/operations/versions/subvolume_v2.py create mode 100644 src/pybind/mgr/volumes/fs/operations/volume.py create mode 100644 src/pybind/mgr/volumes/fs/purge_queue.py create mode 100644 src/pybind/mgr/volumes/fs/vol_spec.py create mode 100644 src/pybind/mgr/volumes/fs/volume.py create mode 100644 src/pybind/mgr/volumes/module.py create mode 100644 src/pybind/mgr/zabbix/__init__.py create mode 100644 src/pybind/mgr/zabbix/module.py create mode 100644 src/pybind/mgr/zabbix/zabbix_template.xml create mode 100644 src/pybind/rados/CMakeLists.txt create mode 100644 src/pybind/rados/MANIFEST.in create mode 100644 src/pybind/rados/c_rados.pxd create mode 100644 src/pybind/rados/ctime.pxd create mode 100644 src/pybind/rados/mock_rados.pxi create mode 100644 src/pybind/rados/rados.pxd create mode 100644 src/pybind/rados/rados.pyx create mode 100755 src/pybind/rados/setup.py create mode 100644 src/pybind/rbd/CMakeLists.txt create mode 100644 src/pybind/rbd/MANIFEST.in create mode 100644 src/pybind/rbd/c_rbd.pxd create mode 100644 src/pybind/rbd/ctime.pxd create mode 100644 src/pybind/rbd/mock_rbd.pxi create mode 100644 src/pybind/rbd/rbd.pyx create mode 100755 src/pybind/rbd/setup.py create mode 100644 src/pybind/rgw/CMakeLists.txt create mode 100644 src/pybind/rgw/MANIFEST.in create mode 100644 src/pybind/rgw/c_rgw.pxd create mode 100644 src/pybind/rgw/cstat.pxd create mode 100644 src/pybind/rgw/mock_rgw.pxi create mode 100644 src/pybind/rgw/rgw.pyx create mode 100755 src/pybind/rgw/setup.py create mode 100644 src/pybind/tox.ini (limited to 'src/pybind') diff --git a/src/pybind/CMakeLists.txt b/src/pybind/CMakeLists.txt new file mode 100644 index 000000000..b01c49b62 --- /dev/null +++ b/src/pybind/CMakeLists.txt @@ -0,0 +1,80 @@ +include(Distutils) + +set(CYTHON_MODULE_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/cython_modules) + +find_package(Cython REQUIRED) + +add_subdirectory(rados) +add_custom_target(cython_modules ALL + DEPENDS cython_rados) + +if(WITH_RBD) + add_subdirectory(rbd) + add_dependencies(cython_modules cython_rbd) +endif() +if(WITH_CEPHFS) + add_subdirectory(cephfs) + add_dependencies(cython_modules cython_cephfs) +endif() +if(WITH_RADOSGW) + add_subdirectory(rgw) + add_dependencies(cython_modules cython_rgw) +endif() + +# if CMAKE_INSTALL_PREFIX is an empty string, must replace +# it with "/" to make PYTHON_INSTALL_TEMPLATE an absolute path to be +# consistent with all other installation paths. +if(CMAKE_INSTALL_PREFIX) + set(PYTHON_INSTALL_TEMPLATE "${CMAKE_INSTALL_PREFIX}") +else(CMAKE_INSTALL_PREFIX) + set(PYTHON_INSTALL_TEMPLATE "/") +endif(CMAKE_INSTALL_PREFIX) + +execute_process( + COMMAND + ${Python3_EXECUTABLE} -c + "import sysconfig;\ + print(\ + sysconfig.get_path(\ + scheme='deb_system' in sysconfig.get_scheme_names() and 'deb_system' or 'posix_prefix',\ + name='purelib',\ + vars=\ + {'base': '${PYTHON_INSTALL_TEMPLATE}',\ + 'py_version_short': sysconfig.get_config_var('py_version_short')}))" + OUTPUT_VARIABLE "PYTHON3_INSTDIR" + OUTPUT_STRIP_TRAILING_WHITESPACE) + +install(FILES + ceph_argparse.py + ceph_daemon.py + DESTINATION ${PYTHON3_INSTDIR}) + +if(WITH_MGR) + execute_process( + COMMAND ${Python3_EXECUTABLE} -c "import ssl; print('.'.join(map(str,ssl.OPENSSL_VERSION_INFO[0:3])))" + RESULT_VARIABLE PYSSL_RESULT + OUTPUT_VARIABLE PYSSL_VER + ERROR_QUIET) + if (NOT ${PYSSL_RESULT}) + # the ideal way to get the soversion is to parse the suffix of file name of + # `/lib/x86_64-linux-gnu/libssl.so.1.0.0`, but since we're lazy, and will just + # trust the version number here. + macro(get_openssl_soversion version prefix) + string(REPLACE "." ";" ssl_version_list ${version}) + list(GET ssl_version_list 0 ssl_version_major) + list(GET ssl_version_list 1 ssl_version_minor) + set(${prefix}_SOVERSION ${ssl_version_major}.${ssl_version_minor}) + unset(ssl_version_list) + unset(ssl_version_major) + unset(ssl_version_minor) + endmacro() + get_openssl_soversion(${OPENSSL_VERSION} OPENSSL) + get_openssl_soversion(${PYSSL_VER} PYSSL) + if(NOT (OPENSSL_SOVERSION VERSION_EQUAL PYSSL_SOVERSION)) + message(FATAL_ERROR "Python and Ceph link to different OpenSSL versions: ${PYSSL_VER} vs ${OPENSSL_VERSION}") + endif() + else() + message(WARNING "could not determine ssl version of python crypto lib") + endif() + add_subdirectory(mgr) +endif(WITH_MGR) diff --git a/src/pybind/ceph_argparse.py b/src/pybind/ceph_argparse.py new file mode 100644 index 000000000..b4aace3df --- /dev/null +++ b/src/pybind/ceph_argparse.py @@ -0,0 +1,1707 @@ +""" +Types and routines used by the ceph CLI as well as the RESTful +interface. These have to do with querying the daemons for +command-description information, validating user command input against +those descriptions, and submitting the command to the appropriate +daemon. + +Copyright (C) 2013 Inktank Storage, Inc. + +LGPL-2.1 or LGPL-3.0. See file COPYING. +""" +import copy +import enum +import math +import json +import os +import pprint +import re +import socket +import stat +import sys +import threading +import uuid + +from collections import abc +from typing import cast, Any, Callable, Dict, Generic, List, Optional, Sequence, Tuple, Union + +if sys.version_info >= (3, 8): + from typing import get_args, get_origin +else: + def get_args(tp): + if tp is Generic: + return tp + else: + return getattr(tp, '__args__', ()) + + def get_origin(tp): + return getattr(tp, '__origin__', None) + + +# Flags are from MonCommand.h +class Flag: + NOFORWARD = (1 << 0) + OBSOLETE = (1 << 1) + DEPRECATED = (1 << 2) + MGR = (1 << 3) + POLL = (1 << 4) + HIDDEN = (1 << 5) + + +KWARG_EQUALS = "--([^=]+)=(.+)" +KWARG_SPACE = "--([^=]+)" + +try: + basestring +except NameError: + basestring = str + + +class ArgumentError(Exception): + """ + Something wrong with arguments + """ + pass + + +class ArgumentNumber(ArgumentError): + """ + Wrong number of a repeated argument + """ + pass + + +class ArgumentFormat(ArgumentError): + """ + Argument value has wrong format + """ + pass + + +class ArgumentMissing(ArgumentError): + """ + Argument value missing in a command + """ + pass + + +class ArgumentValid(ArgumentError): + """ + Argument value is otherwise invalid (doesn't match choices, for instance) + """ + pass + + +class ArgumentTooFew(ArgumentError): + """ + Fewer arguments than descriptors in signature; may mean to continue + the search, so gets a special exception type + """ + + +class ArgumentPrefix(ArgumentError): + """ + Special for mismatched prefix; less severe, don't report by default + """ + pass + + +class JsonFormat(Exception): + """ + some syntactic or semantic issue with the JSON + """ + pass + + +class CephArgtype(object): + """ + Base class for all Ceph argument types + + Instantiating an object sets any validation parameters + (allowable strings, numeric ranges, etc.). The 'valid' + method validates a string against that initialized instance, + throwing ArgumentError if there's a problem. + """ + def __init__(self, **kwargs): + """ + set any per-instance validation parameters here + from kwargs (fixed string sets, integer ranges, etc) + """ + pass + + def valid(self, s, partial=False): + """ + Run validation against given string s (generally one word); + partial means to accept partial string matches (begins-with). + If cool, set self.val to the value that should be returned + (a copy of the input string, or a numeric or boolean interpretation + thereof, for example) + if not, throw ArgumentError(msg-as-to-why) + """ + self.val = s + + def __repr__(self): + """ + return string representation of description of type. Note, + this is not a representation of the actual value. Subclasses + probably also override __str__() to give a more user-friendly + 'name/type' description for use in command format help messages. + """ + return self.__class__.__name__ + + def __str__(self): + """ + where __repr__ (ideally) returns a string that could be used to + reproduce the object, __str__ returns one you'd like to see in + print messages. Use __str__ to format the argtype descriptor + as it would be useful in a command usage message. + """ + return '<{0}>'.format(self.__class__.__name__) + + def __call__(self, v): + return v + + def complete(self, s): + return [] + + @staticmethod + def _compound_type_to_argdesc(tp, attrs, positional): + # generate argdesc from Sequence[T], Tuple[T,..] and Optional[T] + orig_type = get_origin(tp) + type_args = get_args(tp) + if orig_type in (abc.Sequence, Sequence, List, list): + assert len(type_args) == 1 + attrs['n'] = 'N' + return CephArgtype.to_argdesc(type_args[0], attrs, positional=positional) + elif orig_type is Tuple: + assert len(type_args) >= 1 + inner_tp = type_args[0] + assert type_args.count(inner_tp) == len(type_args), \ + f'all elements in {tp} should be identical' + attrs['n'] = str(len(type_args)) + return CephArgtype.to_argdesc(inner_tp, attrs, positional=positional) + elif get_origin(tp) is Union: + # should be Union[t, NoneType] + assert len(type_args) == 2 and isinstance(None, type_args[1]) + return CephArgtype.to_argdesc(type_args[0], attrs, True, positional) + else: + raise ValueError(f"unknown type '{tp}': '{attrs}'") + + @staticmethod + def to_argdesc(tp, attrs, has_default=False, positional=True): + if has_default: + attrs['req'] = 'false' + if not positional: + attrs['positional'] = 'false' + CEPH_ARG_TYPES = { + str: CephString, + int: CephInt, + float: CephFloat, + bool: CephBool + } + try: + return CEPH_ARG_TYPES[tp]().argdesc(attrs) + except KeyError: + if isinstance(tp, CephArgtype): + return tp.argdesc(attrs) + elif isinstance(tp, type) and issubclass(tp, enum.Enum): + return CephChoices(tp=tp).argdesc(attrs) + else: + return CephArgtype._compound_type_to_argdesc(tp, attrs, positional) + + def argdesc(self, attrs): + attrs['type'] = type(self).__name__ + return ','.join(f'{k}={v}' for k, v in attrs.items()) + + @staticmethod + def _cast_to_compound_type(tp, v): + orig_type = get_origin(tp) + type_args = get_args(tp) + if orig_type in (abc.Sequence, Sequence, List, list): + if v is None: + return None + return [CephArgtype.cast_to(type_args[0], e) for e in v] + elif orig_type is Tuple: + return tuple(CephArgtype.cast_to(type_args[0], e) for e in v) + elif get_origin(tp) is Union: + # should be Union[t, NoneType] + assert len(type_args) == 2 and isinstance(None, type_args[1]) + return CephArgtype.cast_to(type_args[0], v) + else: + raise ValueError(f"unknown type '{tp}': '{v}'") + + @staticmethod + def cast_to(tp, v): + PYTHON_TYPES = ( + str, + int, + float, + bool + ) + if tp in PYTHON_TYPES: + return tp(v) + elif isinstance(tp, type) and issubclass(tp, enum.Enum): + return tp(v) + else: + return CephArgtype._cast_to_compound_type(tp, v) + + +class CephInt(CephArgtype): + """ + range-limited integers, [+|-][0-9]+ or 0x[0-9a-f]+ + range: list of 1 or 2 ints, [min] or [min,max] + """ + def __init__(self, range=''): + if range == '': + self.range = list() + else: + self.range = [int(x) for x in range.split('|')] + + def valid(self, s, partial=False): + try: + val = int(s, 0) + except ValueError: + raise ArgumentValid("{0} doesn't represent an int".format(s)) + if len(self.range) == 2: + if val < self.range[0] or val > self.range[1]: + raise ArgumentValid(f"{val} not in range {self.range}") + elif len(self.range) == 1: + if val < self.range[0]: + raise ArgumentValid(f"{val} not in range {self.range}") + self.val = val + + def __str__(self): + r = '' + if len(self.range) == 1: + r = '[{0}-]'.format(self.range[0]) + if len(self.range) == 2: + r = '[{0}-{1}]'.format(self.range[0], self.range[1]) + + return ''.format(r) + + def argdesc(self, attrs): + if self.range: + attrs['range'] = '|'.join(str(v) for v in self.range) + return super().argdesc(attrs) + + +class CephFloat(CephArgtype): + """ + range-limited float type + range: list of 1 or 2 floats, [min] or [min, max] + """ + def __init__(self, range=''): + if range == '': + self.range = list() + else: + self.range = [float(x) for x in range.split('|')] + + def valid(self, s, partial=False): + try: + val = float(s) + except ValueError: + raise ArgumentValid("{0} doesn't represent a float".format(s)) + if len(self.range) == 2: + if val < self.range[0] or val > self.range[1]: + raise ArgumentValid(f"{val} not in range {self.range}") + elif len(self.range) == 1: + if val < self.range[0]: + raise ArgumentValid(f"{val} not in range {self.range}") + self.val = val + + def __str__(self): + r = '' + if len(self.range) == 1: + r = '[{0}-]'.format(self.range[0]) + if len(self.range) == 2: + r = '[{0}-{1}]'.format(self.range[0], self.range[1]) + return ''.format(r) + + def argdesc(self, attrs): + if self.range: + attrs['range'] = '|'.join(str(v) for v in self.range) + return super().argdesc(attrs) + + +class CephString(CephArgtype): + """ + String; pretty generic. goodchars is a RE char class of valid chars + """ + def __init__(self, goodchars=''): + from string import printable + try: + re.compile(goodchars) + except re.error: + raise ValueError('CephString(): "{0}" is not a valid RE'. + format(goodchars)) + self.goodchars = goodchars + self.goodset = frozenset( + [c for c in printable if re.match(goodchars, c)] + ) + + def valid(self, s: str, partial: bool = False) -> None: + sset = set(s) + if self.goodset and not sset <= self.goodset: + raise ArgumentFormat("invalid chars {0} in {1}". + format(''.join(sset - self.goodset), s)) + self.val = s + + def __str__(self) -> str: + b = '' + if self.goodchars: + b += '(goodchars {0})'.format(self.goodchars) + return ''.format(b) + + def complete(self, s) -> List[str]: + if s == '': + return [] + else: + return [s] + + def argdesc(self, attrs): + if self.goodchars: + attrs['goodchars'] = self.goodchars + return super().argdesc(attrs) + + +class CephSocketpath(CephArgtype): + """ + Admin socket path; check that it's readable and S_ISSOCK + """ + def valid(self, s: str, partial: bool = False) -> None: + mode = os.stat(s).st_mode + if not stat.S_ISSOCK(mode): + raise ArgumentValid('socket path {0} is not a socket'.format(s)) + self.val = s + + def __str__(self) -> str: + return '' + + +class CephIPAddr(CephArgtype): + """ + IP address (v4 or v6) with optional port + """ + def valid(self, s, partial=False): + # parse off port, use socket to validate addr + type = 6 + p: Optional[str] = None + if s.startswith('['): + type = 6 + elif s.find('.') != -1: + type = 4 + if type == 4: + port = s.find(':') + if port != -1: + a = s[:port] + p = s[port + 1:] + if int(p) > 65535: + raise ArgumentValid('{0}: invalid IPv4 port'.format(p)) + else: + a = s + p = None + try: + socket.inet_pton(socket.AF_INET, a) + except OSError: + raise ArgumentValid('{0}: invalid IPv4 address'.format(a)) + else: + # v6 + if s.startswith('['): + end = s.find(']') + if end == -1: + raise ArgumentFormat('{0} missing terminating ]'.format(s)) + if s[end + 1] == ':': + try: + p = s[end + 2] + except ValueError: + raise ArgumentValid('{0}: bad port number'.format(s)) + a = s[1:end] + else: + a = s + p = None + try: + socket.inet_pton(socket.AF_INET6, a) + except OSError: + raise ArgumentValid('{0} not valid IPv6 address'.format(s)) + if p is not None and int(p) > 65535: + raise ArgumentValid("{0} not a valid port number".format(p)) + self.val = s + self.addr = a + self.port = p + + def __str__(self): + return '' + + +class CephEntityAddr(CephIPAddr): + """ + EntityAddress, that is, IP address[/nonce] + """ + def valid(self, s: str, partial: bool = False) -> None: + nonce = None + if '/' in s: + ip, nonce = s.split('/') + else: + ip = s + super(self.__class__, self).valid(ip) + if nonce: + nonce_int = None + try: + nonce_int = int(nonce) + except ValueError: + pass + if nonce_int is None or nonce_int < 0: + raise ArgumentValid( + '{0}: invalid entity, nonce {1} not integer > 0'. + format(s, nonce) + ) + self.val = s + + def __str__(self) -> str: + return '' + + +class CephPoolname(CephArgtype): + """ + Pool name; very little utility + """ + def __str__(self) -> str: + return '' + + +class CephObjectname(CephArgtype): + """ + Object name. Maybe should be combined with Pool name as they're always + present in pairs, and then could be checked for presence + """ + def __str__(self) -> str: + return '' + + +class CephPgid(CephArgtype): + """ + pgid, in form N.xxx (N = pool number, xxx = hex pgnum) + """ + def valid(self, s, partial=False): + if s.find('.') == -1: + raise ArgumentFormat('pgid has no .') + poolid_s, pgnum_s = s.split('.', 1) + try: + poolid = int(poolid_s) + except ValueError: + raise ArgumentFormat('pool {0} not integer'.format(poolid)) + if poolid < 0: + raise ArgumentFormat('pool {0} < 0'.format(poolid)) + try: + pgnum = int(pgnum_s, 16) + except ValueError: + raise ArgumentFormat('pgnum {0} not hex integer'.format(pgnum)) + self.val = s + + def __str__(self): + return '' + + +class CephName(CephArgtype): + """ + Name (type.id) where: + type is osd|mon|client|mds + id is a base10 int, if type == osd, or a string otherwise + + Also accept '*' + """ + def __init__(self) -> None: + self.nametype: Optional[str] = None + self.nameid: Optional[str] = None + + def valid(self, s, partial=False): + if s == '*': + self.val = s + return + elif s == "mgr": + self.nametype = "mgr" + self.val = s + return + elif s == "mon": + self.nametype = "mon" + self.val = s + return + if s.find('.') == -1: + raise ArgumentFormat('CephName: no . in {0}'.format(s)) + else: + t, i = s.split('.', 1) + if t not in ('osd', 'mon', 'client', 'mds', 'mgr'): + raise ArgumentValid('unknown type ' + t) + if t == 'osd': + if i != '*': + try: + int(i) + except ValueError: + raise ArgumentFormat(f'osd id {i} not integer') + self.nametype = t + self.val = s + self.nameid = i + + def __str__(self): + return '' + + +class CephOsdName(CephArgtype): + """ + Like CephName, but specific to osds: allow alone + + osd., or , or *, where id is a base10 int + """ + def __init__(self): + self.nametype: Optional[str] = None + self.nameid: Optional[int] = None + + def valid(self, s, partial=False): + if s == '*': + self.val = s + return + if s.find('.') != -1: + t, i = s.split('.', 1) + if t != 'osd': + raise ArgumentValid('unknown type ' + t) + else: + t = 'osd' + i = s + try: + v = int(i) + except ValueError: + raise ArgumentFormat(f'osd id {i} not integer') + if v < 0: + raise ArgumentFormat(f'osd id {v} is less than 0') + self.nametype = t + self.nameid = v + self.val = v + + def __str__(self): + return '' + + +class CephChoices(CephArgtype): + """ + Set of string literals; init with valid choices + """ + def __init__(self, strings='', tp=None, **kwargs): + self.strings = strings.split('|') + self.enum = tp + if self.enum is not None: + self.strings = list(e.value for e in self.enum) + + def valid(self, s, partial=False): + if not partial: + if s not in self.strings: + # show as __str__ does: {s1|s2..} + raise ArgumentValid("{0} not in {1}".format(s, self)) + self.val = s + return + + # partial + for t in self.strings: + if t.startswith(s): + self.val = s + return + raise ArgumentValid("{0} not in {1}". format(s, self)) + + def __str__(self): + if len(self.strings) == 1: + return '{0}'.format(self.strings[0]) + else: + return '{0}'.format('|'.join(self.strings)) + + def __call__(self, v): + if self.enum is None: + return v + else: + return self.enum[v] + + def complete(self, s): + all_elems = [token for token in self.strings if token.startswith(s)] + return all_elems + + def argdesc(self, attrs): + attrs['strings'] = '|'.join(self.strings) + return super().argdesc(attrs) + + +class CephBool(CephArgtype): + """ + A boolean argument, values may be case insensitive 'true', 'false', '0', + '1'. In keyword form, value may be left off (implies true). + """ + def __init__(self, strings='', **kwargs): + self.strings = strings.split('|') + + def valid(self, s, partial=False): + lower_case = s.lower() + if lower_case in ['true', '1']: + self.val = True + elif lower_case in ['false', '0']: + self.val = False + else: + raise ArgumentValid("{0} not one of 'true', 'false'".format(s)) + + def __str__(self): + return '' + + +class CephFilepath(CephArgtype): + """ + Openable file + """ + def valid(self, s, partial=False): + # set self.val if the specified path is readable or writable + s = os.path.abspath(s) + if not os.access(s, os.R_OK): + self._validate_writable_file(s) + self.val = s + + def _validate_writable_file(self, fname): + if os.path.exists(fname): + if os.path.isfile(fname): + if not os.access(fname, os.W_OK): + raise ArgumentValid('{0} is not writable'.format(fname)) + else: + raise ArgumentValid('{0} is not file'.format(fname)) + else: + dirname = os.path.dirname(fname) + if not os.access(dirname, os.W_OK): + raise ArgumentValid('cannot create file in {0}'.format(dirname)) + + def __str__(self): + return '' + + +class CephFragment(CephArgtype): + """ + 'Fragment' ??? XXX + """ + def valid(self, s, partial=False): + if s.find('/') == -1: + raise ArgumentFormat('{0}: no /'.format(s)) + val, bits = s.split('/') + # XXX is this right? + if not val.startswith('0x'): + raise ArgumentFormat("{0} not a hex integer".format(val)) + try: + int(val) + except ValueError: + raise ArgumentFormat('can\'t convert {0} to integer'.format(val)) + try: + int(bits) + except ValueError: + raise ArgumentFormat('can\'t convert {0} to integer'.format(bits)) + self.val = s + + def __str__(self): + return "" + + +class CephUUID(CephArgtype): + """ + CephUUID: pretty self-explanatory + """ + def valid(self, s: str, partial: bool = False) -> None: + try: + uuid.UUID(s) + except Exception as e: + raise ArgumentFormat('invalid UUID {0}: {1}'.format(s, e)) + self.val = s + + def __str__(self) -> str: + return '' + + +class CephPrefix(CephArgtype): + """ + CephPrefix: magic type for "all the first n fixed strings" + """ + def __init__(self, prefix: str = '') -> None: + self.prefix = prefix + + def valid(self, s: str, partial: bool = False) -> None: + try: + s = str(s) + if isinstance(s, bytes): + # `prefix` can always be converted into unicode when being compared, + # but `s` could be anything passed by user. + s = s.decode('ascii') + except UnicodeEncodeError: + raise ArgumentPrefix(u"no match for {0}".format(s)) + except UnicodeDecodeError: + raise ArgumentPrefix("no match for {0}".format(s)) + + if partial: + if self.prefix.startswith(s): + self.val = s + return + else: + if s == self.prefix: + self.val = s + return + + raise ArgumentPrefix("no match for {0}".format(s)) + + def __str__(self) -> str: + return self.prefix + + def complete(self, s) -> List[str]: + if self.prefix.startswith(s): + return [self.prefix.rstrip(' ')] + else: + return [] + + +class argdesc(object): + """ + argdesc(typename, name='name', n=numallowed|N, + req=False, positional=True, + helptext=helptext, **kwargs (type-specific)) + + validation rules: + typename: type(**kwargs) will be constructed + later, type.valid(w) will be called with a word in that position + + name is used for parse errors and for constructing JSON output + n is a numeric literal or 'n|N', meaning "at least one, but maybe more" + req=False means the argument need not be present in the list + positional=False means the argument name must be specified, e.g. "--myoption value" + helptext is the associated help for the command + anything else are arguments to pass to the type constructor. + + self.instance is an instance of type t constructed with typeargs. + + valid() will later be called with input to validate against it, + and will store the validated value in self.instance.val for extraction. + """ + def __init__(self, t, name=None, n=1, req=True, positional=True, **kwargs) -> None: + if isinstance(t, basestring): + self.t = CephPrefix + self.typeargs = {'prefix': t} + self.req = True + self.positional = True + else: + self.t = t + self.typeargs = kwargs + self.req = req in (True, 'True', 'true') + self.positional = positional in (True, 'True', 'true') + if not positional: + assert not req + + self.name = name + self.N = (n in ['n', 'N']) + if self.N: + self.n = 1 + else: + self.n = int(n) + + self.numseen = 0 + + self.instance = self.t(**self.typeargs) + + def __repr__(self): + r = 'argdesc(' + str(self.t) + ', ' + internals = ['N', 'typeargs', 'instance', 't'] + for (k, v) in self.__dict__.items(): + if k.startswith('__') or k in internals: + pass + else: + # undo modification from __init__ + if k == 'n' and self.N: + v = 'N' + r += '{0}={1}, '.format(k, v) + for (k, v) in self.typeargs.items(): + r += '{0}={1}, '.format(k, v) + return r[:-2] + ')' + + def __str__(self): + if ((self.t == CephChoices and len(self.instance.strings) == 1) + or (self.t == CephPrefix)): + s = str(self.instance) + else: + s = '{0}({1})'.format(self.name, str(self.instance)) + if self.N: + s += '...' + if not self.req: + s = '[' + s + ']' + return s + + def helpstr(self): + """ + like str(), but omit parameter names (except for CephString, + which really needs them) + """ + if self.positional: + if self.t == CephBool: + chunk = "--{0}".format(self.name.replace("_", "-")) + elif self.t == CephPrefix: + chunk = str(self.instance) + elif self.t == CephChoices: + if self.name == 'format': + # this is for talking to legacy clusters only; new clusters + # should properly mark format args as non-positional. + chunk = f'--{self.name} {{{str(self.instance)}}}' + else: + chunk = f'<{self.name}:{self.instance}>' + elif self.t == CephOsdName: + # it just so happens all CephOsdName commands are named 'id' anyway, + # so is perfect. + chunk = '' + elif self.t == CephName: + # CephName commands similarly only have one arg of the + # type, so is good. + chunk = '' + elif self.t == CephInt: + chunk = '<{0}:int>'.format(self.name) + elif self.t == CephFloat: + chunk = '<{0}:float>'.format(self.name) + else: + chunk = '<{0}>'.format(self.name) + s = chunk + if self.N: + s += '...' + if not self.req: + s = '[' + s + ']' + else: + # non-positional + if self.t == CephBool: + chunk = "--{0}".format(self.name.replace("_", "-")) + elif self.t == CephPrefix: + chunk = str(self.instance) + elif self.t == CephChoices: + chunk = f'--{self.name} {{{str(self.instance)}}}' + elif self.t == CephOsdName: + chunk = f'--{self.name} ' + elif self.t == CephName: + chunk = f'--{self.name} ' + elif self.t == CephInt: + chunk = f'--{self.name} ' + elif self.t == CephFloat: + chunk = f'--{self.name} ' + else: + chunk = f'--{self.name} ' + s = chunk + if self.N: + s += '...' + if not self.req: # req should *always* be false + s = '[' + s + ']' + + return s + + def complete(self, s): + return self.instance.complete(s) + + +def concise_sig(sig): + """ + Return string representation of sig useful for syntax reference in help + """ + return ' '.join([d.helpstr() for d in sig]) + + +def descsort_key(sh): + """ + sort descriptors by prefixes, defined as the concatenation of all simple + strings in the descriptor; this works out to just the leading strings. + """ + return concise_sig(sh['sig']) + + +def parse_funcsig(sig: Sequence[Union[str, Dict[str, Any]]]) -> List[argdesc]: + """ + parse a single descriptor (array of strings or dicts) into a + dict of function descriptor/validators (objects of CephXXX type) + + :returns: list of ``argdesc`` + """ + newsig = [] + argnum = 0 + for desc in sig: + argnum += 1 + if isinstance(desc, basestring): + t = CephPrefix + desc = {'type': t, 'name': 'prefix', 'prefix': desc} + else: + # not a simple string, must be dict + if 'type' not in desc: + s = 'JSON descriptor {0} has no type'.format(sig) + raise JsonFormat(s) + # look up type string in our globals() dict; if it's an + # object of type `type`, it must be a + # locally-defined class. otherwise, we haven't a clue. + if desc['type'] in globals(): + t = globals()[desc['type']] + if not isinstance(t, type): + s = 'unknown type {0}'.format(desc['type']) + raise JsonFormat(s) + else: + s = 'unknown type {0}'.format(desc['type']) + raise JsonFormat(s) + + kwargs = dict() + for key, val in desc.items(): + if key not in ['type', 'name', 'n', 'req', 'positional']: + kwargs[key] = val + newsig.append(argdesc(t, + name=desc.get('name', None), + n=desc.get('n', 1), + req=desc.get('req', True), + positional=desc.get('positional', True), + **kwargs)) + return newsig + + +def parse_json_funcsigs(s: str, + consumer: str) -> Dict[str, Dict[str, List[argdesc]]]: + """ + A function signature is mostly an array of argdesc; it's represented + in JSON as + { + "cmd001": {"sig":[ "type": type, "name": name, "n": num, "req":true|false ], "help":helptext, "module":modulename, "perm":perms, "avail":availability} + . + . + . + ] + + A set of sigs is in an dict mapped by a unique number: + { + "cmd1": { + "sig": ["type.. ], "help":helptext... + } + "cmd2"{ + "sig": [.. ], "help":helptext... + } + } + + Parse the string s and return a dict of dicts, keyed by opcode; + each dict contains 'sig' with the array of descriptors, and 'help' + with the helptext, 'module' with the module name, 'perm' with a + string representing required permissions in that module to execute + this command (and also whether it is a read or write command from + the cluster state perspective), and 'avail' as a hint for + whether the command should be advertised by CLI, REST, or both. + If avail does not contain 'consumer', don't include the command + in the returned dict. + """ + try: + overall = json.loads(s) + except Exception as e: + print("Couldn't parse JSON {0}: {1}".format(s, e), file=sys.stderr) + raise e + sigdict = {} + for cmdtag, cmd in overall.items(): + if 'sig' not in cmd: + s = "JSON descriptor {0} has no 'sig'".format(cmdtag) + raise JsonFormat(s) + # check 'avail' and possibly ignore this command + if 'avail' in cmd: + if consumer not in cmd['avail']: + continue + # rewrite the 'sig' item with the argdesc-ized version, and... + cmd['sig'] = parse_funcsig(cmd['sig']) + # just take everything else as given + sigdict[cmdtag] = cmd + return sigdict + + +ArgValT = Union[bool, int, float, str, Tuple[str, str]] + +def validate_one(word: str, + desc, + is_kwarg: bool, + partial: bool = False) -> List[ArgValT]: + """ + validate_one(word, desc, is_kwarg, partial=False) + + validate word against the constructed instance of the type + in desc. May raise exception. If it returns false (and doesn't + raise an exception), desc.instance.val will + contain the validated value (in the appropriate type). + """ + vals = [] + # CephString option might contain "," in it + allow_csv = is_kwarg or desc.t is not CephString + if desc.N and allow_csv: + for part in word.split(','): + desc.instance.valid(part, partial) + vals.append(desc.instance.val) + else: + desc.instance.valid(word, partial) + vals.append(desc.instance.val) + desc.numseen += 1 + if desc.N: + desc.n = desc.numseen + 1 + return vals + + +def matchnum(args: List[str], + signature: List[argdesc], + partial: bool = False) -> int: + """ + matchnum(s, signature, partial=False) + + Returns number of arguments matched in s against signature. + Can be used to determine most-likely command for full or partial + matches (partial applies to string matches). + """ + words = args[:] + mysig = copy.deepcopy(signature) + matchcnt = 0 + for desc in mysig: + desc.numseen = 0 + while desc.numseen < desc.n: + # if there are no more arguments, return + if not words: + return matchcnt + word = words.pop(0) + + try: + # only allow partial matching if we're on the last supplied + # word; avoid matching foo bar and foot bar just because + # partial is set + validate_one(word, desc, False, partial and (len(words) == 0)) + valid = True + except ArgumentError: + # matchnum doesn't care about type of error + valid = False + + if not valid: + if not desc.req: + # this wasn't required, so word may match the next desc + words.insert(0, word) + break + else: + # it was required, and didn't match, return + return matchcnt + if desc.req: + matchcnt += 1 + return matchcnt + + +ValidatedArg = Union[bool, int, float, str, + Tuple[str, str], + Sequence[str]] +ValidatedArgs = Dict[str, ValidatedArg] + + +def store_arg(desc: argdesc, args: Sequence[ValidatedArg], d: ValidatedArgs): + ''' + Store argument described by, and held in, thanks to valid(), + desc into the dictionary d, keyed by desc.name. Three cases: + + 1) desc.N is set: use args for arg value in "d", desc.instance.val + only contains the last parsed arg in the "args" list + 2) prefix: multiple args are joined with ' ' into one d{} item + 3) single prefix or other arg: store as simple value + + Used in validate() below. + ''' + if desc.N: + # value should be a list + if desc.name in d: + d[desc.name] += args + else: + d[desc.name] = args + elif (desc.t == CephPrefix) and (desc.name in d): + # prefixes' values should be a space-joined concatenation + d[desc.name] += ' ' + desc.instance.val + else: + # if first CephPrefix or any other type, just set it + d[desc.name] = desc.instance.val + + +def validate(args: List[str], + signature: Sequence[argdesc], + flags: int = 0, + partial: Optional[bool] = False) -> ValidatedArgs: + """ + validate(args, signature, flags=0, partial=False) + + args is a list of strings representing a possible + command input following format of signature. Runs a validation; no + exception means it's OK. Return a dict containing all arguments keyed + by their descriptor name, with duplicate args per name accumulated + into a list (or space-separated value for CephPrefix). + + Mismatches of prefix are non-fatal, as this probably just means the + search hasn't hit the correct command. Mismatches of non-prefix + arguments are treated as fatal, and an exception raised. + + This matching is modified if partial is set: allow partial matching + (with partial dict returned); in this case, there are no exceptions + raised. + """ + + myargs = copy.deepcopy(args) + mysig = copy.deepcopy(signature) + reqsiglen = len([desc for desc in mysig if desc.req]) + matchcnt = 0 + d: ValidatedArgs = dict() + save_exception = None + + arg_descs_by_name: Dict[str, argdesc] = \ + dict((desc.name, desc) for desc in mysig if desc.t != CephPrefix) + + # Special case: detect "injectargs" (legacy way of modifying daemon + # configs) and permit "--" string arguments if so. + injectargs = myargs and myargs[0] == "injectargs" + + # Make a pass through all arguments + for desc in mysig: + desc.numseen = 0 + + while desc.numseen < desc.n: + if myargs: + myarg: Optional[str] = myargs.pop(0) + else: + myarg = None + + # no arg, but not required? Continue consuming mysig + # in case there are later required args + if myarg in (None, []): + if not desc.req: + break + # did we already get this argument (as a named arg, earlier?) + if desc.name in d: + break + + # A keyword argument? + if myarg: + # argdesc for the keyword argument, if we find one + kwarg_desc = None + + # Try both styles of keyword argument + kwarg_match = re.match(KWARG_EQUALS, myarg) + if kwarg_match: + # We have a "--foo=bar" style argument + kwarg_k, kwarg_v = kwarg_match.groups() + + # Either "--foo-bar" or "--foo_bar" style is accepted + kwarg_k = kwarg_k.replace('-', '_') + + kwarg_desc = arg_descs_by_name.get(kwarg_k, None) + else: + # Maybe this is a "--foo bar" or "--bool" style argument + key_match = re.match(KWARG_SPACE, myarg) + if key_match: + kwarg_k = key_match.group(1) + + # Permit --foo-bar=123 form or --foo_bar=123 form, + # assuming all command definitions use foo_bar argument + # naming style + kwarg_k = kwarg_k.replace('-', '_') + + kwarg_desc = arg_descs_by_name.get(kwarg_k, None) + if kwarg_desc: + if kwarg_desc.t == CephBool: + kwarg_v = 'true' + elif len(myargs): # Some trailing arguments exist + kwarg_v = myargs.pop(0) + else: + # Forget it, this is not a valid kwarg + kwarg_desc = None + + if kwarg_desc: + args = validate_one(kwarg_v, kwarg_desc, True) + matchcnt += 1 + store_arg(kwarg_desc, args, d) + continue + + if not desc.positional: + # No more positional args! + raise ArgumentValid(f"Unexpected argument '{myarg}'") + + # Don't handle something as a positional argument if it + # has a leading "--" unless it's a CephChoices (used for + # "--yes-i-really-mean-it") + if myarg and myarg.startswith("--"): + # Special cases for instances of confirmation flags + # that were defined as CephString/CephChoices instead of CephBool + # in pre-nautilus versions of Ceph daemons. + is_value = desc.t == CephChoices \ + or myarg == "--yes-i-really-mean-it" \ + or myarg == "--yes-i-really-really-mean-it" \ + or myarg == "--yes-i-really-really-mean-it-not-faking" \ + or myarg == "--force" \ + or injectargs + + if not is_value: + # Didn't get caught by kwarg handling, but has a "--", so + # we must assume it's something invalid, to avoid naively + # passing through mis-typed options as the values of + # positional arguments. + raise ArgumentValid("Unexpected argument '{0}'".format( + myarg)) + + # out of arguments for a required param? + # Either return (if partial validation) or raise + if myarg in (None, []) and desc.req: + if desc.N and desc.numseen < 1: + # wanted N, didn't even get 1 + if partial: + return d + raise ArgumentNumber( + 'saw {0} of {1}, expected at least 1'. + format(desc.numseen, desc) + ) + elif not desc.N and desc.numseen < desc.n: + # wanted n, got too few + if partial: + return d + # special-case the "0 expected 1" case + if desc.numseen == 0 and desc.n == 1: + raise ArgumentMissing( + 'missing required parameter {0}'.format(desc) + ) + raise ArgumentNumber( + 'saw {0} of {1}, expected {2}'. + format(desc.numseen, desc, desc.n) + ) + break + + # Have an arg; validate it + assert myarg is not None + try: + args = validate_one(myarg, desc, False) + except ArgumentError as e: + # argument mismatch + if not desc.req: + # if not required, just push back; it might match + # the next arg + save_exception = [myarg, e] + myargs.insert(0, myarg) + break + else: + # hm, it was required, so time to return/raise + if partial: + return d + raise + + # Whew, valid arg acquired. Store in dict + matchcnt += 1 + store_arg(desc, args, d) + # Clear prior exception + save_exception = None + + # Done with entire list of argdescs + if matchcnt < reqsiglen: + raise ArgumentTooFew("not enough arguments given") + + if myargs and not partial: + if save_exception: + print(save_exception[0], 'not valid: ', save_exception[1], file=sys.stderr) + raise ArgumentError("unused arguments: " + str(myargs)) + + if flags & Flag.MGR: + d['target'] = ('mon-mgr', '') + + if flags & Flag.POLL: + d['poll'] = True + + # Finally, success + return d + + +def validate_command(sigdict: Dict[str, Dict[str, Any]], + args: List[str], + verbose: Optional[bool] = False) -> ValidatedArgs: + """ + Parse positional arguments into a parameter dict, according to + the command descriptions. + + Writes advice about nearly-matching commands ``sys.stderr`` if + the arguments do not match any command. + + :param sigdict: A command description dictionary, as returned + from Ceph daemons by the get_command_descriptions + command. + :param args: List of strings, should match one of the command + signatures in ``sigdict`` + + :returns: A dict of parsed parameters (including ``prefix``), + or an empty dict if the args did not match any signature + """ + if verbose: + print("validate_command: " + " ".join(args), file=sys.stderr) + found: Optional[Dict[str, Any]] = None + valid_dict = {} + + # look for best match, accumulate possibles in bestcmds + # (so we can maybe give a more-useful error message) + best_match_cnt = 0.0 + bestcmds: List[Dict[str, Any]] = [] + for cmd in sigdict.values(): + flags = cmd.get('flags', 0) + if flags & Flag.OBSOLETE: + continue + sig = cmd['sig'] + matched: float = matchnum(args, sig, partial=True) + if (matched >= math.floor(best_match_cnt) and + matched == matchnum(args, sig, partial=False)): + # prefer those fully matched over partial patch + matched += 0.5 + if matched < best_match_cnt: + continue + if verbose: + print("better match: {0} > {1}: {2} ".format( + matched, best_match_cnt, concise_sig(sig) + ), file=sys.stderr) + if matched > best_match_cnt: + best_match_cnt = matched + bestcmds = [cmd] + else: + bestcmds.append(cmd) + + # Sort bestcmds by number of req args so we can try shortest first + # (relies on a cmdsig being key,val where val is a list of len 1) + + def grade(cmd): + # prefer optional arguments over required ones + sigs = cmd['sig'] + return sum(map(lambda sig: sig.req, sigs)) + + bestcmds_sorted = sorted(bestcmds, key=grade) + if verbose: + print("bestcmds_sorted: ", file=sys.stderr) + pprint.PrettyPrinter(stream=sys.stderr).pprint(bestcmds_sorted) + + ex: Optional[ArgumentError] = None + # for everything in bestcmds, look for a true match + for cmd in bestcmds_sorted: + sig = cmd['sig'] + try: + valid_dict = validate(args, sig, flags=cmd.get('flags', 0)) + found = cmd + break + except ArgumentPrefix: + # ignore prefix mismatches; we just haven't found + # the right command yet + pass + except ArgumentMissing as e: + ex = e + if len(bestcmds) == 1: + found = cmd + break + except ArgumentTooFew: + # It looked like this matched the beginning, but it + # didn't have enough args supplied. If we're out of + # cmdsigs we'll fall out unfound; if we're not, maybe + # the next one matches completely. Whine, but pass. + if verbose: + print('Not enough args supplied for ', + concise_sig(sig), file=sys.stderr) + except ArgumentError as e: + ex = e + # Solid mismatch on an arg (type, range, etc.) + # Stop now, because we have the right command but + # some other input is invalid + found = cmd + break + + if found: + if not valid_dict: + print("Invalid command:", ex, file=sys.stderr) + print(concise_sig(sig), ': ', cmd['help'], file=sys.stderr) + else: + bestcmds = [c for c in bestcmds + if not c.get('flags', 0) & (Flag.DEPRECATED | Flag.HIDDEN)] + bestcmds = bestcmds[:10] # top 10 + print('no valid command found; {0} closest matches:'.format(len(bestcmds)), + file=sys.stderr) + for cmd in bestcmds: + print(concise_sig(cmd['sig']), file=sys.stderr) + return valid_dict + + +def find_cmd_target(childargs: List[str]) -> Tuple[str, Optional[str]]: + """ + Using a minimal validation, figure out whether the command + should be sent to a monitor or an osd. We do this before even + asking for the 'real' set of command signatures, so we can ask the + right daemon. + Returns ('osd', osdid), ('pg', pgid), ('mgr', '') or ('mon', '') + """ + sig = parse_funcsig(['tell', {'name': 'target', 'type': 'CephName'}]) + try: + valid_dict = validate(childargs, sig, partial=True) + except ArgumentError: + pass + else: + if len(valid_dict) == 2: + # revalidate to isolate type and id + name = CephName() + # if this fails, something is horribly wrong, as it just + # validated successfully above + name.valid(valid_dict['target']) + assert name.nametype is not None + return name.nametype, name.nameid + + sig = parse_funcsig(['tell', {'name': 'pgid', 'type': 'CephPgid'}]) + try: + valid_dict = validate(childargs, sig, partial=True) + except ArgumentError: + pass + else: + if len(valid_dict) == 2: + # pg doesn't need revalidation; the string is fine + pgid = valid_dict['pgid'] + assert isinstance(pgid, str) + return 'pg', pgid + + # If we reached this far it must mean that so far we've been unable to + # obtain a proper target from childargs. This may mean that we are not + # dealing with a 'tell' command, or that the specified target is invalid. + # If the latter, we likely were unable to catch it because we were not + # really looking for it: first we tried to parse a 'CephName' (osd, mon, + # mds, followed by and id); given our failure to parse, we tried to parse + # a 'CephPgid' instead (e.g., 0.4a). Considering we got this far though + # we were unable to do so. + # + # We will now check if this is a tell and, if so, forcefully validate the + # target as a 'CephName'. This must be so because otherwise we will end + # up sending garbage to a monitor, which is the default target when a + # target is not explicitly specified. + # e.g., + # 'ceph status' -> target is any one monitor + # 'ceph tell mon.* status -> target is all monitors + # 'ceph tell foo status -> target is invalid! + if len(childargs) > 1 and childargs[0] == 'tell': + name = CephName() + # CephName.valid() raises on validation error; find_cmd_target()'s + # caller should handle them + name.valid(childargs[1]) + assert name.nametype is not None + assert name.nameid is not None + return name.nametype, name.nameid + + sig = parse_funcsig(['pg', {'name': 'pgid', 'type': 'CephPgid'}]) + try: + valid_dict = validate(childargs, sig, partial=True) + except ArgumentError: + pass + else: + if len(valid_dict) == 2: + pgid = valid_dict['pgid'] + assert isinstance(pgid, str) + return 'pg', pgid + + return 'mon', '' + + +class RadosThread(threading.Thread): + def __init__(self, func, *args, **kwargs): + self.args = args + self.kwargs = kwargs + self.func = func + self.exception = None + threading.Thread.__init__(self) + + def run(self): + try: + self.retval = self.func(*self.args, **self.kwargs) + except Exception as e: + self.exception = e + + +def run_in_thread(func: Callable[[Any, Any], Tuple[int, bytes, str]], + *args: Any, **kwargs: Any) -> Tuple[int, bytes, str]: + timeout = kwargs.pop('timeout', 0) + if timeout == 0 or timeout is None: + # python threading module will just get blocked if timeout is `None`, + # otherwise it will keep polling until timeout or thread stops. + # timeout in integer when converting it to nanoseconds, but since + # python3 uses `int64_t` for the deadline before timeout expires, + # we have to use a safe value which does not overflow after being + # added to current time in microseconds. + timeout = 24 * 60 * 60 + t = RadosThread(func, *args, **kwargs) + + # allow the main thread to exit (presumably, avoid a join() on this + # subthread) before this thread terminates. This allows SIGINT + # exit of a blocked call. See below. + t.daemon = True + + t.start() + t.join(timeout=timeout) + # ..but allow SIGINT to terminate the waiting. Note: this + # relies on the Linux kernel behavior of delivering the signal + # to the main thread in preference to any subthread (all that's + # strictly guaranteed is that *some* thread that has the signal + # unblocked will receive it). But there doesn't seem to be + # any interface to create a thread with SIGINT blocked. + if t.is_alive(): + raise Exception("timed out") + elif t.exception: + raise t.exception + else: + return t.retval + + +def send_command_retry(*args: Any, **kwargs: Any) -> Tuple[int, bytes, str]: + while True: + try: + return send_command(*args, **kwargs) + except Exception as e: + # If our librados instance has not reached state 'connected' + # yet, we'll see an exception like this and retry + if ('get_command_descriptions' in str(e) and + 'object in state configuring' in str(e)): + continue + else: + raise + + +def send_command(cluster, + target: Tuple[str, Optional[str]] = ('mon', ''), + cmd: Optional[str] = None, + inbuf: Optional[bytes] = b'', + timeout: Optional[int] = 0, + verbose: Optional[bool] = False) -> Tuple[int, bytes, str]: + """ + Send a command to a daemon using librados's + mon_command, osd_command, mgr_command, or pg_command. Any bulk input data + comes in inbuf. + + Returns (ret, outbuf, outs); ret is the return code, outbuf is + the outbl "bulk useful output" buffer, and outs is any status + or error message (intended for stderr). + + If target is osd.N, send command to that osd (except for pgid cmds) + """ + try: + if target[0] == 'osd': + osdid = target[1] + assert osdid is not None + + if verbose: + print('submit {0} to osd.{1}'.format(cmd, osdid), + file=sys.stderr) + ret, outbuf, outs = run_in_thread( + cluster.osd_command, int(osdid), cmd, inbuf, timeout=timeout) + + elif target[0] == 'mgr': + name = '' # non-None empty string means "current active mgr" + if len(target) > 1 and target[1] is not None: + name = target[1] + if verbose: + print('submit {0} to {1} name {2}'.format(cmd, target[0], name), + file=sys.stderr) + ret, outbuf, outs = run_in_thread( + cluster.mgr_command, cmd, inbuf, timeout=timeout, target=name) + + elif target[0] == 'mon-mgr': + if verbose: + print('submit {0} to {1}'.format(cmd, target[0]), + file=sys.stderr) + ret, outbuf, outs = run_in_thread( + cluster.mgr_command, cmd, inbuf, timeout=timeout) + + elif target[0] == 'pg': + pgid = target[1] + # pgid will already be in the command for the pg + # form, but for tell , we need to put it in + if cmd: + cmddict = json.loads(cmd) + cmddict['pgid'] = pgid + else: + cmddict = dict(pgid=pgid) + cmd = json.dumps(cmddict) + if verbose: + print('submit {0} for pgid {1}'.format(cmd, pgid), + file=sys.stderr) + ret, outbuf, outs = run_in_thread( + cluster.pg_command, pgid, cmd, inbuf, timeout=timeout) + + elif target[0] == 'mon': + if verbose: + print('{0} to {1}'.format(cmd, target[0]), + file=sys.stderr) + if len(target) < 2 or target[1] == '': + ret, outbuf, outs = run_in_thread( + cluster.mon_command, cmd, inbuf, timeout=timeout) + else: + ret, outbuf, outs = run_in_thread( + cluster.mon_command, cmd, inbuf, timeout=timeout, target=target[1]) + elif target[0] == 'mds': + mds_spec = target[1] + + if verbose: + print('submit {0} to mds.{1}'.format(cmd, mds_spec), + file=sys.stderr) + + try: + from cephfs import LibCephFS + except ImportError: + raise RuntimeError("CephFS unavailable, have you installed libcephfs?") + + filesystem = LibCephFS(rados_inst=cluster) + filesystem.init() + ret, outbuf, outs = \ + filesystem.mds_command(mds_spec, cmd, inbuf) + filesystem.shutdown() + else: + raise ArgumentValid("Bad target type '{0}'".format(target[0])) + + except Exception as e: + if not isinstance(e, ArgumentError): + raise RuntimeError('"{0}": exception {1}'.format(cmd, e)) + else: + raise + + return ret, outbuf, outs + + +def json_command(cluster, + target: Tuple[str, Optional[str]] = ('mon', ''), + prefix: Optional[str] = None, + argdict: Optional[ValidatedArgs] = None, + inbuf: Optional[bytes] = b'', + timeout: Optional[int] = 0, + verbose: Optional[bool] = False) -> Tuple[int, bytes, str]: + """ + Serialize a command and up a JSON command and send it with send_command() above. + Prefix may be supplied separately or in argdict. Any bulk input + data comes in inbuf. + + If target is osd.N, send command to that osd (except for pgid cmds) + + :param cluster: ``rados.Rados`` instance + :param prefix: String to inject into command arguments as 'prefix' + :param argdict: Command arguments + """ + cmddict: ValidatedArgs = {} + if prefix: + cmddict.update({'prefix': prefix}) + + if argdict: + cmddict.update(argdict) + if 'target' in argdict: + target = cast(Tuple[str, str], argdict['target']) + + try: + if target[0] == 'osd': + osdtarg = CephName() + osdtarget = '{0}.{1}'.format(*target) + # prefer target from cmddict if present and valid + if 'target' in cmddict: + osdtarget = cast(str, cmddict.pop('target')) + try: + osdtarg.valid(osdtarget) + target = ('osd', osdtarg.nameid) + except: + # use the target we were originally given + pass + ret, outbuf, outs = send_command_retry(cluster, + target, json.dumps(cmddict), + inbuf, timeout, verbose) + + except Exception as e: + if not isinstance(e, ArgumentError): + raise RuntimeError('"{0}": exception {1}'.format(argdict, e)) + else: + raise + + return ret, outbuf, outs diff --git a/src/pybind/ceph_daemon.py b/src/pybind/ceph_daemon.py new file mode 100644 index 000000000..0a70e92bc --- /dev/null +++ b/src/pybind/ceph_daemon.py @@ -0,0 +1,431 @@ +# -*- mode:python -*- +# vim: ts=4 sw=4 smarttab expandtab + +""" +Copyright (C) 2015 Red Hat + +This is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License version 2, as published by the Free Software +Foundation. See file COPYING. +""" + +import sys +import json +import socket +import struct +import time +from collections import OrderedDict +from fcntl import ioctl +from fnmatch import fnmatch +from prettytable import PrettyTable, HEADER +from signal import signal, Signals, SIGWINCH +from termios import TIOCGWINSZ +from types import FrameType +from typing import Any, Callable, Dict, List, Optional, Sequence, TextIO, Tuple, Union + +from ceph_argparse import parse_json_funcsigs, validate_command + +COUNTER = 0x8 +LONG_RUNNING_AVG = 0x4 +READ_CHUNK_SIZE = 4096 + + +def admin_socket(asok_path: str, + cmd: List[str], + format: Optional[str] = '') -> bytes: + """ + Send a daemon (--admin-daemon) command 'cmd'. asok_path is the + path to the admin socket; cmd is a list of strings; format may be + set to one of the formatted forms to get output in that form + (daemon commands don't support 'plain' output). + """ + + def do_sockio(path: str, cmd_bytes: bytes) -> bytes: + """ helper: do all the actual low-level stream I/O """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(path) + try: + sock.sendall(cmd_bytes + b'\0') + len_str = sock.recv(4) + if len(len_str) < 4: + raise RuntimeError("no data returned from admin socket") + l, = struct.unpack(">I", len_str) + sock_ret = b'' + + got = 0 + while got < l: + # recv() receives signed int, i.e max 2GB + # workaround by capping READ_CHUNK_SIZE per call. + want = min(l - got, READ_CHUNK_SIZE) + bit = sock.recv(want) + sock_ret += bit + got += len(bit) + + except Exception as sock_e: + raise RuntimeError('exception: ' + str(sock_e)) + return sock_ret + + try: + cmd_json = do_sockio(asok_path, + b'{"prefix": "get_command_descriptions"}') + except Exception as e: + raise RuntimeError('exception getting command descriptions: ' + str(e)) + + sigdict = parse_json_funcsigs(cmd_json.decode('utf-8'), 'cli') + valid_dict = validate_command(sigdict, cmd) + if not valid_dict: + raise RuntimeError('invalid command') + + if format: + valid_dict['format'] = format + + try: + ret = do_sockio(asok_path, json.dumps(valid_dict).encode('utf-8')) + except Exception as e: + raise RuntimeError('exception: ' + str(e)) + + return ret + + +class Termsize(object): + DEFAULT_SIZE = (25, 80) + + def __init__(self) -> None: + self.rows, self.cols = self._gettermsize() + self.changed = False + + def _gettermsize(self) -> Tuple[int, int]: + try: + fd = sys.stdin.fileno() + sz = struct.pack('hhhh', 0, 0, 0, 0) + rows, cols = struct.unpack('hhhh', ioctl(fd, TIOCGWINSZ, sz))[:2] + return rows, cols + except IOError: + return self.DEFAULT_SIZE + + def update(self) -> None: + rows, cols = self._gettermsize() + if not self.changed: + self.changed = (self.rows, self.cols) != (rows, cols) + self.rows, self.cols = rows, cols + + def reset_changed(self) -> None: + self.changed = False + + def __str__(self) -> str: + return '%s(%dx%d, changed %s)' % (self.__class__, + self.rows, self.cols, self.changed) + + def __repr__(self) -> str: + return '%s(%d,%d,%s)' % (self.__class__, + self.rows, self.cols, self.changed) + + +class DaemonWatcher(object): + """ + Given a Ceph daemon's admin socket path, poll its performance counters + and output a series of output lines showing the momentary values of + counters of interest (those with the 'nick' property in Ceph's schema) + """ + ( + BLACK, + RED, + GREEN, + YELLOW, + BLUE, + MAGENTA, + CYAN, + GRAY + ) = range(8) + + RESET_SEQ = "\033[0m" + COLOR_SEQ = "\033[1;%dm" + COLOR_DARK_SEQ = "\033[0;%dm" + BOLD_SEQ = "\033[1m" + UNDERLINE_SEQ = "\033[4m" + + def __init__(self, + asok: str, + statpats: Optional[Sequence[str]] = None, + min_prio: int = 0) -> None: + self.asok_path = asok + self._colored = False + + self._stats: Optional[Dict[str, dict]] = None + self._schema = None + self._statpats = statpats + self._stats_that_fit: Dict[str, dict] = OrderedDict() + self._min_prio = min_prio + self.termsize = Termsize() + + def supports_color(self, ostr: TextIO) -> bool: + """ + Returns True if the running system's terminal supports color, and False + otherwise. + """ + unsupported_platform = (sys.platform in ('win32', 'Pocket PC')) + # isatty is not always implemented, #6223. + is_a_tty = hasattr(ostr, 'isatty') and ostr.isatty() + if unsupported_platform or not is_a_tty: + return False + return True + + def colorize(self, + msg: str, + color: int, + dark: bool = False) -> str: + """ + Decorate `msg` with escape sequences to give the requested color + """ + return (self.COLOR_DARK_SEQ if dark else self.COLOR_SEQ) % (30 + color) \ + + msg + self.RESET_SEQ + + def bold(self, msg: str) -> str: + """ + Decorate `msg` with escape sequences to make it appear bold + """ + return self.BOLD_SEQ + msg + self.RESET_SEQ + + def format_dimless(self, n: int, width: int) -> str: + """ + Format a number without units, so as to fit into `width` characters, substituting + an appropriate unit suffix. + """ + units = [' ', 'k', 'M', 'G', 'T', 'P', 'E', 'Z'] + unit = 0 + while len("%s" % (int(n) // (1000**unit))) > width - 1: + if unit >= len(units) - 1: + break + unit += 1 + + if unit > 0: + truncated_float = ("%f" % (n / (1000.0 ** unit)))[0:width - 1] + if truncated_float[-1] == '.': + truncated_float = " " + truncated_float[0:-1] + else: + truncated_float = "%{wid}d".format(wid=width-1) % n + formatted = "%s%s" % (truncated_float, units[unit]) + + if self._colored: + if n == 0: + color = self.BLACK, False + else: + color = self.YELLOW, False + return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \ + + self.bold(self.colorize(formatted[-1], self.YELLOW, False)) + else: + return formatted + + def col_width(self, nick: str) -> int: + """ + Given the short name `nick` for a column, how many characters + of width should the column be allocated? Does not include spacing + between columns. + """ + return max(len(nick), 4) + + def get_stats_that_fit(self) -> Tuple[Dict[str, dict], bool]: + ''' + Get a possibly-truncated list of stats to display based on + current terminal width. Allow breaking mid-section. + ''' + current_fit: Dict[str, dict] = OrderedDict() + if self.termsize.changed or not self._stats_that_fit: + width = 0 + assert self._stats is not None + for section_name, names in self._stats.items(): + for name, stat_data in names.items(): + width += self.col_width(stat_data) + 1 + if width > self.termsize.cols: + break + if section_name not in current_fit: + current_fit[section_name] = OrderedDict() + current_fit[section_name][name] = stat_data + if width > self.termsize.cols: + break + + self.termsize.reset_changed() + changed = bool(current_fit) and (current_fit != self._stats_that_fit) + if changed: + self._stats_that_fit = current_fit + return self._stats_that_fit, changed + + def _print_headers(self, ostr: TextIO) -> None: + """ + Print a header row to `ostr` + """ + header = "" + stats, _ = self.get_stats_that_fit() + for section_name, names in stats.items(): + section_width = \ + sum([self.col_width(x) + 1 for x in names.values()]) - 1 + pad = max(section_width - len(section_name), 0) + pad_prefix = pad // 2 + header += (pad_prefix * '-') + header += (section_name[0:section_width]) + header += ((pad - pad_prefix) * '-') + header += ' ' + header += "\n" + ostr.write(self.colorize(header, self.BLUE, True)) + + sub_header = "" + for section_name, names in stats.items(): + for stat_name, stat_nick in names.items(): + sub_header += self.UNDERLINE_SEQ \ + + self.colorize( + stat_nick.ljust(self.col_width(stat_nick)), + self.BLUE) \ + + ' ' + sub_header = sub_header[0:-1] + self.colorize('|', self.BLUE) + sub_header += "\n" + ostr.write(sub_header) + + def _print_vals(self, + ostr: TextIO, + dump: Dict[str, Any], + last_dump: Dict[str, Any]) -> None: + """ + Print a single row of values to `ostr`, based on deltas between `dump` and + `last_dump`. + """ + val_row = "" + fit, changed = self.get_stats_that_fit() + if changed: + self._print_headers(ostr) + for section_name, names in fit.items(): + for stat_name, stat_nick in names.items(): + assert self._schema is not None + stat_type = self._schema[section_name][stat_name]['type'] + if bool(stat_type & COUNTER): + n = max(dump[section_name][stat_name] - + last_dump[section_name][stat_name], 0) + elif bool(stat_type & LONG_RUNNING_AVG): + entries = dump[section_name][stat_name]['avgcount'] - \ + last_dump[section_name][stat_name]['avgcount'] + if entries: + n = (dump[section_name][stat_name]['sum'] - + last_dump[section_name][stat_name]['sum']) \ + / float(entries) + n *= 1000.0 # Present in milliseconds + else: + n = 0 + else: + n = dump[section_name][stat_name] + + val_row += self.format_dimless(int(n), + self.col_width(stat_nick)) + val_row += " " + val_row = val_row[0:-1] + val_row += self.colorize("|", self.BLUE) + val_row = val_row[0:-len(self.colorize("|", self.BLUE))] + ostr.write("{0}\n".format(val_row)) + + def _should_include(self, sect: str, name: str, prio: int) -> bool: + ''' + boolean: should we output this stat? + + 1) If self._statpats exists and the name filename-glob-matches + anything in the list, and prio is high enough, or + 2) If self._statpats doesn't exist and prio is high enough + + then yes. + ''' + if self._statpats: + sectname = '.'.join((sect, name)) + if not any([ + p for p in self._statpats + if fnmatch(name, p) or fnmatch(sectname, p) + ]): + return False + + if self._min_prio is not None and prio is not None: + return (prio >= self._min_prio) + + return True + + def _load_schema(self) -> None: + """ + Populate our instance-local copy of the daemon's performance counter + schema, and work out which stats we will display. + """ + self._schema = json.loads( + admin_socket(self.asok_path, ["perf", "schema"]).decode('utf-8'), + object_pairs_hook=OrderedDict) + + # Build list of which stats we will display + self._stats = OrderedDict() + assert self._schema is not None + for section_name, section_stats in self._schema.items(): + for name, schema_data in section_stats.items(): + prio = schema_data.get('priority', 0) + if self._should_include(section_name, name, prio): + if section_name not in self._stats: + self._stats[section_name] = OrderedDict() + self._stats[section_name][name] = schema_data['nick'] + if not len(self._stats): + raise RuntimeError("no stats selected by filters") + + def _handle_sigwinch(self, + signo: Union[int, Signals], + frame: Optional[FrameType]) -> None: + self.termsize.update() + + def run(self, + interval: int, + count: Optional[int] = None, + ostr: TextIO = sys.stdout) -> None: + """ + Print output at regular intervals until interrupted. + + :param ostr: Stream to which to send output + """ + + self._load_schema() + self._colored = self.supports_color(ostr) + + self._print_headers(ostr) + + last_dump = json.loads(admin_socket(self.asok_path, ["perf", "dump"]).decode('utf-8')) + rows_since_header = 0 + + try: + signal(SIGWINCH, self._handle_sigwinch) + while True: + dump = json.loads(admin_socket(self.asok_path, ["perf", "dump"]).decode('utf-8')) + if rows_since_header >= self.termsize.rows - 2: + self._print_headers(ostr) + rows_since_header = 0 + self._print_vals(ostr, dump, last_dump) + if count is not None: + count -= 1 + if count <= 0: + break + rows_since_header += 1 + last_dump = dump + + # time.sleep() is interrupted by SIGWINCH; avoid that + end = time.time() + interval + while time.time() < end: + time.sleep(end - time.time()) + + except KeyboardInterrupt: + return + + def list(self, ostr: TextIO = sys.stdout) -> None: + """ + Show all selected stats with section, full name, nick, and prio + """ + table = PrettyTable(('section', 'name', 'nick', 'prio')) + table.align['section'] = 'l' + table.align['name'] = 'l' + table.align['nick'] = 'l' + table.align['prio'] = 'r' + self._load_schema() + assert self._stats is not None + assert self._schema is not None + for section_name, section_stats in self._stats.items(): + for name, nick in section_stats.items(): + prio = self._schema[section_name][name].get('priority') or 0 + table.add_row((section_name, name, nick, prio)) + ostr.write(table.get_string(hrules=HEADER) + '\n') diff --git a/src/pybind/ceph_mgr_repl.py b/src/pybind/ceph_mgr_repl.py new file mode 100755 index 000000000..159abdc99 --- /dev/null +++ b/src/pybind/ceph_mgr_repl.py @@ -0,0 +1,132 @@ +#!/usr/bin/python3 +# -*- mode:python -*- +# vim: ts=4 sw=4 smarttab expandtab + +__all__ = ['ConsoleOptions', 'MgrModuleInterpreter'] + +import readline +import sys +from code import InteractiveConsole +from collections import namedtuple +from pathlib import Path + +from ceph_argparse import json_command + + +ConsoleOptions = namedtuple('ConsoleOptions', + ['name', 'conffile', 'prefix', 'timeout']) + + +class MgrModuleInteractiveConsole(InteractiveConsole): + def __init__(self, rados, opt, filename=""): + super().__init__(filename) + self.cmd_prefix = opt.prefix + self.timeout = opt.timeout + self.cluster = rados.Rados(name=opt.name, + conffile=opt.conffile) + self.cluster.connect(timeout=opt.timeout) + + def _do_runsource(self, source): + ret, buf, s = json_command(self.cluster, + prefix=self.cmd_prefix, + target=('mon-mgr',), + inbuf=source.encode(), + timeout=self.timeout) + if ret == 0: + # TODO: better way to encode the outputs + sys.stdout.write(buf.decode()) + sys.stderr.write(s) + else: + # needs more + self.write("the input is not complete") + + def runsource(self, source, filename='', symbol='single'): + try: + # just validate the syntax + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self._do_runsource(source) + return False + + def runcode(self, code): + # code object cannot be pickled + raise NotImplementedError() + + +def show_env(): + prog = Path(__file__).resolve() + ceph_dir = prog.parents[2] + python_path = ':'.join([f'{ceph_dir}/src/pybind', + f'{ceph_dir}/build/lib/cython_modules/lib.3', + f'{ceph_dir}/src/python-common', + '$PYTHONPATH']) + ld_library_path = ':'.join([f'{ceph_dir}/build/lib', + '$LD_LIBRARY_PATH']) + return f''' + $ export PYTHONPATH={python_path} + $ export LD_LIBRARY_PATH={ld_library_path}'''.strip('\n') + + +def main(): + import argparse + try: + import rados + except ImportError: + print(f'''Unable to import rados python binding. +Please set the environment variables first: +{show_env()}''', + file=sys.stderr) + exit(1) + + prog = Path(__file__).name + epilog = f'''Usage: + {prog} -c "print(mgr.release_name)"''' + parser = argparse.ArgumentParser(epilog=epilog) + parser.add_argument('--name', action='store', + default='client.admin', + help='user name for connecting to cluster') + parser.add_argument('--conffile', action='store', + default=rados.Rados.DEFAULT_CONF_FILES, + help='path to ceph.conf') + parser.add_argument('--prefix', action='store', + default='mgr self-test eval', + help='command prefix for eval the source') + parser.add_argument('--timeout', action='store', + type=int, + default=10, + help='timeout in seconds') + parser.add_argument('--show-env', action='store_true', + help='show instructions to set environment variables') + group = parser.add_mutually_exclusive_group() + group.add_argument('-c', action='store', + help='optional statement', + dest='command') + group.add_argument('script', nargs='?', type=argparse.FileType('r')) + args = parser.parse_args() + options = ConsoleOptions(name=args.name, + conffile=args.conffile, + prefix=args.prefix, + timeout=args.timeout) + console = MgrModuleInteractiveConsole(rados, options) + if args.show_env: + print(show_env()) + elif args.command: + console.runsource(args.command) + elif args.script: + console.runsource(args.script.read()) + else: + sys.ps1 = f'[{args.prefix}] >>> ' + console.interact() + + +if __name__ == '__main__': + main() diff --git a/src/pybind/cephfs/CMakeLists.txt b/src/pybind/cephfs/CMakeLists.txt new file mode 100644 index 000000000..bd1ed3d55 --- /dev/null +++ b/src/pybind/cephfs/CMakeLists.txt @@ -0,0 +1,5 @@ +distutils_add_cython_module(cython_cephfs + cephfs + ${CMAKE_CURRENT_SOURCE_DIR}/cephfs.pyx) +add_dependencies(cython_cephfs cephfs) +distutils_install_cython_module(cython_cephfs) diff --git a/src/pybind/cephfs/MANIFEST.in b/src/pybind/cephfs/MANIFEST.in new file mode 100644 index 000000000..abd9275e2 --- /dev/null +++ b/src/pybind/cephfs/MANIFEST.in @@ -0,0 +1 @@ +include cephfs.pyx diff --git a/src/pybind/cephfs/c_cephfs.pxd b/src/pybind/cephfs/c_cephfs.pxd new file mode 100644 index 000000000..69d24912b --- /dev/null +++ b/src/pybind/cephfs/c_cephfs.pxd @@ -0,0 +1,163 @@ +from libc.stdint cimport * +from types cimport * + +cdef extern from "cephfs/ceph_ll_client.h": + cdef struct statx "ceph_statx": + uint32_t stx_mask + uint32_t stx_blksize + uint32_t stx_nlink + uint32_t stx_uid + uint32_t stx_gid + uint16_t stx_mode + uint64_t stx_ino + uint64_t stx_size + uint64_t stx_blocks + uint64_t stx_dev + uint64_t stx_rdev + timespec stx_atime + timespec stx_ctime + timespec stx_mtime + timespec stx_btime + uint64_t stx_version + +cdef extern from "cephfs/libcephfs.h" nogil: + cdef struct ceph_mount_info: + pass + + cdef struct ceph_dir_result: + pass + + cdef struct snap_metadata: + const char *key + const char *value + + cdef struct snap_info: + uint64_t id + size_t nr_snap_metadata + snap_metadata *snap_metadata + + cdef struct ceph_snapdiff_info: + pass + + cdef struct ceph_snapdiff_entry_t: + dirent dir_entry + uint64_t snapid + + ctypedef void* rados_t + + const char *ceph_version(int *major, int *minor, int *patch) + + int ceph_create(ceph_mount_info **cmount, const char * const id) + int ceph_create_from_rados(ceph_mount_info **cmount, rados_t cluster) + int ceph_init(ceph_mount_info *cmount) + void ceph_shutdown(ceph_mount_info *cmount) + + int ceph_getaddrs(ceph_mount_info* cmount, char** addrs) + int64_t ceph_get_fs_cid(ceph_mount_info *cmount) + int ceph_conf_read_file(ceph_mount_info *cmount, const char *path_list) + int ceph_conf_parse_argv(ceph_mount_info *cmount, int argc, const char **argv) + int ceph_conf_get(ceph_mount_info *cmount, const char *option, char *buf, size_t len) + int ceph_conf_set(ceph_mount_info *cmount, const char *option, const char *value) + int ceph_set_mount_timeout(ceph_mount_info *cmount, uint32_t timeout) + + int ceph_mount(ceph_mount_info *cmount, const char *root) + int ceph_select_filesystem(ceph_mount_info *cmount, const char *fs_name) + int ceph_unmount(ceph_mount_info *cmount) + int ceph_abort_conn(ceph_mount_info *cmount) + uint64_t ceph_get_instance_id(ceph_mount_info *cmount) + int ceph_fstatx(ceph_mount_info *cmount, int fd, statx *stx, unsigned want, unsigned flags) + int ceph_statx(ceph_mount_info *cmount, const char *path, statx *stx, unsigned want, unsigned flags) + int ceph_statfs(ceph_mount_info *cmount, const char *path, statvfs *stbuf) + + int ceph_setattrx(ceph_mount_info *cmount, const char *relpath, statx *stx, int mask, int flags) + int ceph_fsetattrx(ceph_mount_info *cmount, int fd, statx *stx, int mask) + int ceph_mds_command(ceph_mount_info *cmount, const char *mds_spec, const char **cmd, size_t cmdlen, + const char *inbuf, size_t inbuflen, char **outbuf, size_t *outbuflen, + char **outs, size_t *outslen) + int ceph_rename(ceph_mount_info *cmount, const char *from_, const char *to) + int ceph_link(ceph_mount_info *cmount, const char *existing, const char *newname) + int ceph_unlink(ceph_mount_info *cmount, const char *path) + int ceph_symlink(ceph_mount_info *cmount, const char *existing, const char *newname) + int ceph_readlink(ceph_mount_info *cmount, const char *path, char *buf, int64_t size) + int ceph_setxattr(ceph_mount_info *cmount, const char *path, const char *name, + const void *value, size_t size, int flags) + int ceph_fsetxattr(ceph_mount_info *cmount, int fd, const char *name, + const void *value, size_t size, int flags) + int ceph_lsetxattr(ceph_mount_info *cmount, const char *path, const char *name, + const void *value, size_t size, int flags) + int ceph_getxattr(ceph_mount_info *cmount, const char *path, const char *name, + void *value, size_t size) + int ceph_fgetxattr(ceph_mount_info *cmount, int fd, const char *name, + void *value, size_t size) + int ceph_lgetxattr(ceph_mount_info *cmount, const char *path, const char *name, + void *value, size_t size) + int ceph_removexattr(ceph_mount_info *cmount, const char *path, const char *name) + int ceph_fremovexattr(ceph_mount_info *cmount, int fd, const char *name) + int ceph_lremovexattr(ceph_mount_info *cmount, const char *path, const char *name) + int ceph_listxattr(ceph_mount_info *cmount, const char *path, char *list, size_t size) + int ceph_flistxattr(ceph_mount_info *cmount, int fd, char *list, size_t size) + int ceph_llistxattr(ceph_mount_info *cmount, const char *path, char *list, size_t size) + int ceph_write(ceph_mount_info *cmount, int fd, const char *buf, int64_t size, int64_t offset) + int ceph_pwritev(ceph_mount_info *cmount, int fd, iovec *iov, int iovcnt, int64_t offset) + int ceph_read(ceph_mount_info *cmount, int fd, char *buf, int64_t size, int64_t offset) + int ceph_preadv(ceph_mount_info *cmount, int fd, iovec *iov, int iovcnt, int64_t offset) + int ceph_flock(ceph_mount_info *cmount, int fd, int operation, uint64_t owner) + int ceph_mknod(ceph_mount_info *cmount, const char *path, mode_t mode, dev_t rdev) + int ceph_close(ceph_mount_info *cmount, int fd) + int ceph_open(ceph_mount_info *cmount, const char *path, int flags, mode_t mode) + int ceph_mkdir(ceph_mount_info *cmount, const char *path, mode_t mode) + int ceph_mksnap(ceph_mount_info *cmount, const char *path, const char *name, mode_t mode, snap_metadata *snap_metadata, size_t nr_snap_metadata) + int ceph_rmsnap(ceph_mount_info *cmount, const char *path, const char *name) + int ceph_get_snap_info(ceph_mount_info *cmount, const char *path, snap_info *snap_info) + void ceph_free_snap_info_buffer(snap_info *snap_info) + int ceph_mkdirs(ceph_mount_info *cmount, const char *path, mode_t mode) + int ceph_closedir(ceph_mount_info *cmount, ceph_dir_result *dirp) + int ceph_opendir(ceph_mount_info *cmount, const char *name, ceph_dir_result **dirpp) + void ceph_rewinddir(ceph_mount_info *cmount, ceph_dir_result *dirp) + int64_t ceph_telldir(ceph_mount_info *cmount, ceph_dir_result *dirp) + void ceph_seekdir(ceph_mount_info *cmount, ceph_dir_result *dirp, int64_t offset) + int ceph_chdir(ceph_mount_info *cmount, const char *path) + dirent * ceph_readdir(ceph_mount_info *cmount, ceph_dir_result *dirp) + int ceph_open_snapdiff(ceph_mount_info *cmount, + const char *root_path, + const char *rel_path, + const char *snap1, + const char *snap2, + ceph_snapdiff_info *out) + int ceph_readdir_snapdiff(ceph_snapdiff_info *snapdiff, ceph_snapdiff_entry_t *out); + int ceph_close_snapdiff(ceph_snapdiff_info *snapdiff) + int ceph_rmdir(ceph_mount_info *cmount, const char *path) + const char* ceph_getcwd(ceph_mount_info *cmount) + int ceph_sync_fs(ceph_mount_info *cmount) + int ceph_fsync(ceph_mount_info *cmount, int fd, int syncdataonly) + int ceph_lazyio(ceph_mount_info *cmount, int fd, int enable) + int ceph_lazyio_propagate(ceph_mount_info *cmount, int fd, int64_t offset, size_t count) + int ceph_lazyio_synchronize(ceph_mount_info *cmount, int fd, int64_t offset, size_t count) + int ceph_fallocate(ceph_mount_info *cmount, int fd, int mode, int64_t offset, int64_t length) + int ceph_chmod(ceph_mount_info *cmount, const char *path, mode_t mode) + int ceph_lchmod(ceph_mount_info *cmount, const char *path, mode_t mode) + int ceph_fchmod(ceph_mount_info *cmount, int fd, mode_t mode) + int ceph_chown(ceph_mount_info *cmount, const char *path, int uid, int gid) + int ceph_lchown(ceph_mount_info *cmount, const char *path, int uid, int gid) + int ceph_fchown(ceph_mount_info *cmount, int fd, int uid, int gid) + int64_t ceph_lseek(ceph_mount_info *cmount, int fd, int64_t offset, int whence) + void ceph_buffer_free(char *buf) + mode_t ceph_umask(ceph_mount_info *cmount, mode_t mode) + int ceph_utime(ceph_mount_info *cmount, const char *path, utimbuf *buf) + int ceph_futime(ceph_mount_info *cmount, int fd, utimbuf *buf) + int ceph_utimes(ceph_mount_info *cmount, const char *path, timeval times[2]) + int ceph_lutimes(ceph_mount_info *cmount, const char *path, timeval times[2]) + int ceph_futimes(ceph_mount_info *cmount, int fd, timeval times[2]) + int ceph_futimens(ceph_mount_info *cmount, int fd, timespec times[2]) + int ceph_get_file_replication(ceph_mount_info *cmount, int fh) + int ceph_get_path_replication(ceph_mount_info *cmount, const char *path) + int ceph_get_pool_id(ceph_mount_info *cmount, const char *pool_name) + int ceph_get_pool_replication(ceph_mount_info *cmount, int pool_id) + int ceph_debug_get_fd_caps(ceph_mount_info *cmount, int fd) + int ceph_debug_get_file_caps(ceph_mount_info *cmount, const char *path) + uint32_t ceph_get_cap_return_timeout(ceph_mount_info *cmount) + void ceph_set_uuid(ceph_mount_info *cmount, const char *uuid) + void ceph_set_session_timeout(ceph_mount_info *cmount, unsigned timeout) + int ceph_get_file_layout(ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool) + int ceph_get_file_pool_name(ceph_mount_info *cmount, int fh, char *buf, size_t buflen) + int ceph_get_default_data_pool_name(ceph_mount_info *cmount, char *buf, size_t buflen) diff --git a/src/pybind/cephfs/cephfs.pyx b/src/pybind/cephfs/cephfs.pyx new file mode 100644 index 000000000..793d88b98 --- /dev/null +++ b/src/pybind/cephfs/cephfs.pyx @@ -0,0 +1,2777 @@ +""" +This module is a thin wrapper around libcephfs. +""" + +from cpython cimport PyObject, ref, exc +from libc.stdint cimport * +from libc.stdlib cimport malloc, realloc, free + +from types cimport * +IF BUILD_DOC: + include "mock_cephfs.pxi" + cdef class Rados: + cdef: + rados_t cluster +ELSE: + from c_cephfs cimport * + from rados cimport Rados + +from collections import namedtuple +from datetime import datetime +import os +import time +from typing import Any, Dict, Optional + +AT_SYMLINK_NOFOLLOW = 0x0100 +AT_STATX_SYNC_TYPE = 0x6000 +AT_STATX_SYNC_AS_STAT = 0x0000 +AT_STATX_FORCE_SYNC = 0x2000 +AT_STATX_DONT_SYNC = 0x4000 +cdef int AT_SYMLINK_NOFOLLOW_CDEF = AT_SYMLINK_NOFOLLOW +CEPH_STATX_BASIC_STATS = 0x7ff +cdef int CEPH_STATX_BASIC_STATS_CDEF = CEPH_STATX_BASIC_STATS +CEPH_STATX_MODE = 0x1 +CEPH_STATX_NLINK = 0x2 +CEPH_STATX_UID = 0x4 +CEPH_STATX_GID = 0x8 +CEPH_STATX_RDEV = 0x10 +CEPH_STATX_ATIME = 0x20 +CEPH_STATX_MTIME = 0x40 +CEPH_STATX_CTIME = 0x80 +CEPH_STATX_INO = 0x100 +CEPH_STATX_SIZE = 0x200 +CEPH_STATX_BLOCKS = 0x400 +CEPH_STATX_BTIME = 0x800 +CEPH_STATX_VERSION = 0x1000 + +FALLOC_FL_KEEP_SIZE = 0x01 +FALLOC_FL_PUNCH_HOLE = 0x02 +FALLOC_FL_NO_HIDE_STALE = 0x04 + +CEPH_SETATTR_MODE = 0x1 +CEPH_SETATTR_UID = 0x2 +CEPH_SETATTR_GID = 0x4 +CEPH_SETATTR_MTIME = 0x8 +CEPH_SETATTR_ATIME = 0x10 +CEPH_SETATTR_SIZE = 0x20 +CEPH_SETATTR_CTIME = 0x40 +CEPH_SETATTR_BTIME = 0x200 + +CEPH_NOSNAP = -2 + +# errno definitions +cdef enum: + CEPHFS_EBLOCKLISTED = 108 + CEPHFS_EPERM = 1 + CEPHFS_ESTALE = 116 + CEPHFS_ENOSPC = 28 + CEPHFS_ETIMEDOUT = 110 + CEPHFS_EIO = 5 + CEPHFS_ENOTCONN = 107 + CEPHFS_EEXIST = 17 + CEPHFS_EINTR = 4 + CEPHFS_EINVAL = 22 + CEPHFS_EBADF = 9 + CEPHFS_EROFS = 30 + CEPHFS_EAGAIN = 11 + CEPHFS_EACCES = 13 + CEPHFS_ELOOP = 40 + CEPHFS_EISDIR = 21 + CEPHFS_ENOENT = 2 + CEPHFS_ENOTDIR = 20 + CEPHFS_ENAMETOOLONG = 36 + CEPHFS_EBUSY = 16 + CEPHFS_EDQUOT = 122 + CEPHFS_EFBIG = 27 + CEPHFS_ERANGE = 34 + CEPHFS_ENXIO = 6 + CEPHFS_ECANCELED = 125 + CEPHFS_ENODATA = 61 + CEPHFS_EOPNOTSUPP = 95 + CEPHFS_EXDEV = 18 + CEPHFS_ENOMEM = 12 + CEPHFS_ENOTRECOVERABLE = 131 + CEPHFS_ENOSYS = 38 + CEPHFS_EWOULDBLOCK = CEPHFS_EAGAIN + CEPHFS_ENOTEMPTY = 39 + CEPHFS_EDEADLK = 35 + CEPHFS_EDEADLOCK = CEPHFS_EDEADLK + CEPHFS_EDOM = 33 + CEPHFS_EMLINK = 31 + CEPHFS_ETIME = 62 + CEPHFS_EOLDSNAPC = 85 + +cdef extern from "Python.h": + # These are in cpython/string.pxd, but use "object" types instead of + # PyObject*, which invokes assumptions in cpython that we need to + # legitimately break to implement zero-copy string buffers in Image.read(). + # This is valid use of the Python API and documented as a special case. + PyObject *PyBytes_FromStringAndSize(char *v, Py_ssize_t len) except NULL + char* PyBytes_AsString(PyObject *string) except NULL + int _PyBytes_Resize(PyObject **string, Py_ssize_t newsize) except -1 + void PyEval_InitThreads() + + +class Error(Exception): + def get_error_code(self): + return 1 + + +class LibCephFSStateError(Error): + pass + + +class OSError(Error): + def __init__(self, errno, strerror): + super(OSError, self).__init__(errno, strerror) + self.errno = errno + self.strerror = "%s: %s" % (strerror, os.strerror(errno)) + + def __str__(self): + return '{} [Errno {}]'.format(self.strerror, self.errno) + + def get_error_code(self): + return self.errno + + +class PermissionError(OSError): + pass + + +class ObjectNotFound(OSError): + pass + + +class NoData(OSError): + pass + + +class ObjectExists(OSError): + pass + + +class IOError(OSError): + pass + + +class NoSpace(OSError): + pass + + +class InvalidValue(OSError): + pass + + +class OperationNotSupported(OSError): + pass + + +class WouldBlock(OSError): + pass + + +class OutOfRange(OSError): + pass + + +class ObjectNotEmpty(OSError): + pass + +class NotDirectory(OSError): + pass + +class DiskQuotaExceeded(OSError): + pass +class PermissionDenied(OSError): + pass + +cdef errno_to_exception = { + CEPHFS_EPERM : PermissionError, + CEPHFS_ENOENT : ObjectNotFound, + CEPHFS_EIO : IOError, + CEPHFS_ENOSPC : NoSpace, + CEPHFS_EEXIST : ObjectExists, + CEPHFS_ENODATA : NoData, + CEPHFS_EINVAL : InvalidValue, + CEPHFS_EOPNOTSUPP : OperationNotSupported, + CEPHFS_ERANGE : OutOfRange, + CEPHFS_EWOULDBLOCK: WouldBlock, + CEPHFS_ENOTEMPTY : ObjectNotEmpty, + CEPHFS_ENOTDIR : NotDirectory, + CEPHFS_EDQUOT : DiskQuotaExceeded, + CEPHFS_EACCES : PermissionDenied, +} + + +cdef make_ex(ret, msg): + """ + Translate a libcephfs return code into an exception. + + :param ret: the return code + :type ret: int + :param msg: the error message to use + :type msg: str + :returns: a subclass of :class:`Error` + """ + ret = abs(ret) + if ret in errno_to_exception: + return errno_to_exception[ret](ret, msg) + else: + return OSError(ret, msg) + + +class DirEntry(namedtuple('DirEntry', + ['d_ino', 'd_off', 'd_reclen', 'd_type', 'd_name', 'd_snapid'])): + DT_DIR = 0x4 + DT_REG = 0x8 + DT_LNK = 0xA + def is_dir(self): + return self.d_type == self.DT_DIR + + def is_symbol_file(self): + return self.d_type == self.DT_LNK + + def is_file(self): + return self.d_type == self.DT_REG + +StatResult = namedtuple('StatResult', + ["st_dev", "st_ino", "st_mode", "st_nlink", "st_uid", + "st_gid", "st_rdev", "st_size", "st_blksize", + "st_blocks", "st_atime", "st_mtime", "st_ctime"]) + +cdef class DirResult(object): + cdef LibCephFS lib + cdef ceph_dir_result* handle + +# Bug in older Cython instances prevents this from being a static method. +# @staticmethod +# cdef create(LibCephFS lib, ceph_dir_result* handle): +# d = DirResult() +# d.lib = lib +# d.handle = handle +# return d + + def __dealloc__(self): + self.close() + + def __enter__(self): + if not self.handle: + raise make_ex(CEPHFS_EBADF, "dir is not open") + self.lib.require_state("mounted") + with nogil: + ceph_rewinddir(self.lib.cluster, self.handle) + return self + + def __exit__(self, type_, value, traceback): + self.close() + return False + + def readdir(self): + self.lib.require_state("mounted") + + with nogil: + dirent = ceph_readdir(self.lib.cluster, self.handle) + if not dirent: + return None + + IF UNAME_SYSNAME == "FreeBSD" or UNAME_SYSNAME == "Darwin": + return DirEntry(d_ino=dirent.d_ino, + d_off=0, + d_reclen=dirent.d_reclen, + d_type=dirent.d_type, + d_name=dirent.d_name, + d_snapid=CEPH_NOSNAP) + ELSE: + return DirEntry(d_ino=dirent.d_ino, + d_off=dirent.d_off, + d_reclen=dirent.d_reclen, + d_type=dirent.d_type, + d_name=dirent.d_name, + d_snapid=CEPH_NOSNAP) + + def close(self): + if self.handle: + self.lib.require_state("mounted") + with nogil: + ret = ceph_closedir(self.lib.cluster, self.handle) + if ret < 0: + raise make_ex(ret, "closedir failed") + self.handle = NULL + + def rewinddir(self): + if not self.handle: + raise make_ex(CEPHFS_EBADF, "dir is not open") + self.lib.require_state("mounted") + with nogil: + ceph_rewinddir(self.lib.cluster, self.handle) + + def telldir(self): + if not self.handle: + raise make_ex(CEPHFS_EBADF, "dir is not open") + self.lib.require_state("mounted") + with nogil: + ret = ceph_telldir(self.lib.cluster, self.handle) + if ret < 0: + raise make_ex(ret, "telldir failed") + return ret + + def seekdir(self, offset): + if not self.handle: + raise make_ex(CEPHFS_EBADF, "dir is not open") + if not isinstance(offset, int): + raise TypeError('offset must be an int') + self.lib.require_state("mounted") + cdef int64_t _offset = offset + with nogil: + ceph_seekdir(self.lib.cluster, self.handle, _offset) + +cdef class SnapDiffHandle(object): + cdef LibCephFS lib + cdef ceph_snapdiff_info handle + cdef int opened + + def __cinit__(self, _lib): + self.opened = 0 + self.lib = _lib + + def __dealloc__(self): + self.close() + + def readdir(self): + self.lib.require_state("mounted") + + cdef: + ceph_snapdiff_entry_t difent + with nogil: + ret = ceph_readdir_snapdiff(&self.handle, &difent) + if ret < 0: + raise make_ex(ret, "ceph_readdir_snapdiff failed, ret {}" + .format(ret)) + if ret == 0: + return None + + IF UNAME_SYSNAME == "FreeBSD" or UNAME_SYSNAME == "Darwin": + return DirEntry(d_ino=difent.dir_entry.d_ino, + d_off=0, + d_reclen=difent.dir_entry.d_reclen, + d_type=difent.dir_entry.d_type, + d_name=difent.dir_entry.d_name, + d_snapid=difent.snapid) + ELSE: + return DirEntry(d_ino=difent.dir_entry.d_ino, + d_off=difent.dir_entry.d_off, + d_reclen=difent.dir_entry.d_reclen, + d_type=difent.dir_entry.d_type, + d_name=difent.dir_entry.d_name, + d_snapid=difent.snapid) + + def close(self): + if (not self.opened): + return + self.lib.require_state("mounted") + with nogil: + ret = ceph_close_snapdiff(&self.handle) + if ret < 0: + raise make_ex(ret, "closesnapdiff failed") + self.opened = 0 + + +def cstr(val, name, encoding="utf-8", opt=False) -> bytes: + """ + Create a byte string from a Python string + + :param basestring val: Python string + :param str name: Name of the string parameter, for exceptions + :param str encoding: Encoding to use + :param bool opt: If True, None is allowed + :raises: :class:`InvalidArgument` + """ + if opt and val is None: + return None + if isinstance(val, bytes): + return val + else: + try: + v = val.encode(encoding) + except: + raise TypeError('%s must be encodeable as a bytearray' % name) + assert isinstance(v, bytes) + return v + +def cstr_list(list_str, name, encoding="utf-8"): + return [cstr(s, name) for s in list_str] + + +def decode_cstr(val, encoding="utf-8") -> Optional[str]: + """ + Decode a byte string into a Python string. + + :param bytes val: byte string + """ + if val is None: + return None + + return val.decode(encoding) + +cdef timeval to_timeval(t): + """ + return timeval equivalent from time + """ + tt = int(t) + cdef timeval buf = timeval(tt, (t - tt) * 1000000) + return buf + +cdef timespec to_timespec(t): + """ + return timespec equivalent from time + """ + tt = int(t) + cdef timespec buf = timespec(tt, (t - tt) * 1000000000) + return buf + +cdef char* opt_str(s) except? NULL: + if s is None: + return NULL + return s + + +cdef char ** to_bytes_array(list_bytes): + cdef char **ret = malloc(len(list_bytes) * sizeof(char *)) + if ret == NULL: + raise MemoryError("malloc failed") + for i in range(len(list_bytes)): + ret[i] = list_bytes[i] + return ret + + +cdef void* realloc_chk(void* ptr, size_t size) except NULL: + cdef void *ret = realloc(ptr, size) + if ret == NULL: + raise MemoryError("realloc failed") + return ret + + +cdef iovec * to_iovec(buffers) except NULL: + cdef iovec *iov = malloc(len(buffers) * sizeof(iovec)) + cdef char *s = NULL + if iov == NULL: + raise MemoryError("malloc failed") + for i in xrange(len(buffers)): + s = buffers[i] + iov[i] = [s, len(buffers[i])] + return iov + + +cdef class LibCephFS(object): + """libcephfs python wrapper""" + + cdef public object state + cdef ceph_mount_info *cluster + + def require_state(self, *args): + if self.state in args: + return + raise LibCephFSStateError("You cannot perform that operation on a " + "CephFS object in state %s." % (self.state)) + + def __cinit__(self, conf=None, conffile=None, auth_id=None, rados_inst=None): + """Create a libcephfs wrapper + + :param conf dict opt: settings overriding the default ones and conffile + :param conffile str opt: the path to ceph.conf to override the default settings + :auth_id str opt: the id used to authenticate the client entity + :rados_inst Rados opt: a rados.Rados instance + """ + PyEval_InitThreads() + self.state = "uninitialized" + if rados_inst is not None: + if auth_id is not None or conffile is not None or conf is not None: + raise make_ex(CEPHFS_EINVAL, + "May not pass RADOS instance as well as other configuration") + + self.create_with_rados(rados_inst) + else: + self.create(conf, conffile, auth_id) + + def create_with_rados(self, Rados rados_inst): + cdef int ret + with nogil: + ret = ceph_create_from_rados(&self.cluster, rados_inst.cluster) + if ret != 0: + raise Error("libcephfs_initialize failed with error code: %d" % ret) + self.state = "configuring" + + NO_CONF_FILE = -1 + "special value that indicates no conffile should be read when creating a mount handle" + DEFAULT_CONF_FILES = -2 + "special value that indicates the default conffiles should be read when creating a mount handle" + + def create(self, conf=None, conffile=NO_CONF_FILE, auth_id=None): + """ + Create a mount handle for interacting with Ceph. All libcephfs + functions operate on a mount info handle. + + :param conf dict opt: settings overriding the default ones and conffile + :param conffile Union[int,str], optional: the path to ceph.conf to override the default settings + :auth_id str opt: the id used to authenticate the client entity + """ + if conf is not None and not isinstance(conf, dict): + raise TypeError("conf must be dict or None") + cstr(conffile, 'configfile', opt=True) + auth_id = cstr(auth_id, 'auth_id', opt=True) + + cdef: + char* _auth_id = opt_str(auth_id) + int ret + + with nogil: + ret = ceph_create(&self.cluster, _auth_id) + if ret != 0: + raise Error("libcephfs_initialize failed with error code: %d" % ret) + + self.state = "configuring" + if conffile in (self.NO_CONF_FILE, None): + pass + elif conffile in (self.DEFAULT_CONF_FILES, ''): + self.conf_read_file(None) + else: + self.conf_read_file(conffile) + if conf is not None: + for key, value in conf.items(): + self.conf_set(key, value) + + def get_fscid(self): + """ + Return the file system id for this fs client. + """ + self.require_state("mounted") + with nogil: + ret = ceph_get_fs_cid(self.cluster) + if ret < 0: + raise make_ex(ret, "error fetching fscid") + return ret + + def get_addrs(self): + """ + Get associated client addresses with this RADOS session. + """ + self.require_state("mounted") + + cdef: + char* addrs = NULL + + try: + + with nogil: + ret = ceph_getaddrs(self.cluster, &addrs) + if ret: + raise make_ex(ret, "error calling getaddrs") + + return decode_cstr(addrs) + finally: + ceph_buffer_free(addrs) + + + def conf_read_file(self, conffile=None): + """ + Load the ceph configuration from the specified config file. + + :param conffile str opt: the path to ceph.conf to override the default settings + """ + conffile = cstr(conffile, 'conffile', opt=True) + cdef: + char *_conffile = opt_str(conffile) + with nogil: + ret = ceph_conf_read_file(self.cluster, _conffile) + if ret != 0: + raise make_ex(ret, "error calling conf_read_file") + + def conf_parse_argv(self, argv): + """ + Parse the command line arguments and load the configuration parameters. + + :param argv: the argument list + """ + self.require_state("configuring") + cargv = cstr_list(argv, 'argv') + cdef: + int _argc = len(argv) + char **_argv = to_bytes_array(cargv) + + try: + with nogil: + ret = ceph_conf_parse_argv(self.cluster, _argc, + _argv) + if ret != 0: + raise make_ex(ret, "error calling conf_parse_argv") + finally: + free(_argv) + + def shutdown(self): + """ + Unmount and destroy the ceph mount handle. + """ + if self.state in ["initialized", "mounted"]: + with nogil: + ceph_shutdown(self.cluster) + self.state = "shutdown" + + def __enter__(self): + self.mount() + return self + + def __exit__(self, type_, value, traceback): + self.shutdown() + return False + + def __dealloc__(self): + self.shutdown() + + def version(self): + """ + Get the version number of the ``libcephfs`` C library. + + :returns: a tuple of ``(major, minor, extra)`` components of the + libcephfs version + """ + cdef: + int major = 0 + int minor = 0 + int extra = 0 + with nogil: + ceph_version(&major, &minor, &extra) + return (major, minor, extra) + + def conf_get(self, option): + """ + Gets the configuration value as a string. + + :param option: the config option to get + """ + self.require_state("configuring", "initialized", "mounted") + + option = cstr(option, 'option') + cdef: + char *_option = option + size_t length = 20 + char *ret_buf = NULL + + try: + while True: + ret_buf = realloc_chk(ret_buf, length) + with nogil: + ret = ceph_conf_get(self.cluster, _option, ret_buf, length) + if ret == 0: + return decode_cstr(ret_buf) + elif ret == -CEPHFS_ENAMETOOLONG: + length = length * 2 + elif ret == -CEPHFS_ENOENT: + return None + else: + raise make_ex(ret, "error calling conf_get") + finally: + free(ret_buf) + + def conf_set(self, option, val): + """ + Sets a configuration value from a string. + + :param option: the configuration option to set + :param value: the value of the configuration option to set + """ + self.require_state("configuring", "initialized", "mounted") + + option = cstr(option, 'option') + val = cstr(val, 'val') + cdef: + char *_option = option + char *_val = val + + with nogil: + ret = ceph_conf_set(self.cluster, _option, _val) + if ret != 0: + raise make_ex(ret, "error calling conf_set") + + def set_mount_timeout(self, timeout): + """ + Set mount timeout + + :param timeout: mount timeout + """ + self.require_state("configuring", "initialized") + if not isinstance(timeout, int): + raise TypeError('timeout must be an integer') + if timeout < 0: + raise make_ex(CEPHFS_EINVAL, 'timeout must be greater than or equal to 0') + cdef: + uint32_t _timeout = timeout + with nogil: + ret = ceph_set_mount_timeout(self.cluster, _timeout) + if ret != 0: + raise make_ex(ret, "error setting mount timeout") + + def init(self): + """ + Initialize the filesystem client (but do not mount the filesystem yet) + """ + self.require_state("configuring") + with nogil: + ret = ceph_init(self.cluster) + if ret != 0: + raise make_ex(ret, "error calling ceph_init") + self.state = "initialized" + + def mount(self, mount_root=None, filesystem_name=None): + """ + Perform a mount using the path for the root of the mount. + """ + if self.state == "configuring": + self.init() + self.require_state("initialized") + + # Configure which filesystem to mount if one was specified + if filesystem_name is None: + filesystem_name = b"" + else: + filesystem_name = cstr(filesystem_name, 'filesystem_name') + cdef: + char *_filesystem_name = filesystem_name + if filesystem_name: + with nogil: + ret = ceph_select_filesystem(self.cluster, + _filesystem_name) + if ret != 0: + raise make_ex(ret, "error calling ceph_select_filesystem") + + # Prepare mount_root argument, default to "/" + root = b"/" if mount_root is None else mount_root + cdef: + char *_mount_root = root + + with nogil: + ret = ceph_mount(self.cluster, _mount_root) + if ret != 0: + raise make_ex(ret, "error calling ceph_mount") + self.state = "mounted" + + def unmount(self): + """ + Unmount a mount handle. + """ + self.require_state("mounted") + with nogil: + ret = ceph_unmount(self.cluster) + if ret != 0: + raise make_ex(ret, "error calling ceph_unmount") + self.state = "initialized" + + def abort_conn(self): + """ + Abort mds connections. + """ + self.require_state("mounted") + with nogil: + ret = ceph_abort_conn(self.cluster) + if ret != 0: + raise make_ex(ret, "error calling ceph_abort_conn") + self.state = "initialized" + + def get_instance_id(self): + """ + Get a global id for current instance + """ + self.require_state("initialized", "mounted") + with nogil: + ret = ceph_get_instance_id(self.cluster) + return ret; + + def statfs(self, path): + """ + Perform a statfs on the ceph file system. This call fills in file system wide statistics + into the passed in buffer. + + :param path: any path within the mounted filesystem + """ + self.require_state("mounted") + path = cstr(path, 'path') + cdef: + char* _path = path + statvfs statbuf + + with nogil: + ret = ceph_statfs(self.cluster, _path, &statbuf) + if ret < 0: + raise make_ex(ret, "statfs failed: %s" % path) + return {'f_bsize': statbuf.f_bsize, + 'f_frsize': statbuf.f_frsize, + 'f_blocks': statbuf.f_blocks, + 'f_bfree': statbuf.f_bfree, + 'f_bavail': statbuf.f_bavail, + 'f_files': statbuf.f_files, + 'f_ffree': statbuf.f_ffree, + 'f_favail': statbuf.f_favail, + 'f_fsid': statbuf.f_fsid, + 'f_flag': statbuf.f_flag, + 'f_namemax': statbuf.f_namemax} + + def sync_fs(self): + """ + Synchronize all filesystem data to persistent media + """ + self.require_state("mounted") + with nogil: + ret = ceph_sync_fs(self.cluster) + if ret < 0: + raise make_ex(ret, "sync_fs failed") + + def fsync(self, int fd, int syncdataonly): + """ + Synchronize an open file to persistent media. + + :param fd: the file descriptor of the file to sync. + :param syncdataonly: a boolean whether to synchronize metadata and data (0) + or just data (1). + """ + self.require_state("mounted") + with nogil: + ret = ceph_fsync(self.cluster, fd, syncdataonly) + if ret < 0: + raise make_ex(ret, "fsync failed") + + def lazyio(self, fd, enable): + """ + Enable/disable lazyio for the file. + + :param fd: the file descriptor of the file for which to enable lazio. + :param enable: a boolean to enable lazyio or disable lazyio. + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(enable, int): + raise TypeError('enable must be an int') + + cdef: + int _fd = fd + int _enable = enable + + with nogil: + ret = ceph_lazyio(self.cluster, _fd, _enable) + if ret < 0: + raise make_ex(ret, "lazyio failed") + + def lazyio_propagate(self, fd, offset, count): + """ + Flushes the write buffer for the file thereby propogating the buffered write to the file. + + :param fd: the file descriptor of the file to sync. + :param offset: the byte range starting. + :param count: the number of bytes starting from offset. + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + if not isinstance(count, int): + raise TypeError('count must be an int') + + cdef: + int _fd = fd + int64_t _offset = offset + size_t _count = count + + with nogil: + ret = ceph_lazyio_propagate(self.cluster, _fd, _offset, _count) + if ret < 0: + raise make_ex(ret, "lazyio_propagate failed") + + def lazyio_synchronize(self, fd, offset, count): + """ + Flushes the write buffer for the file and invalidate the read cache. This allows a + subsequent read operation to read and cache data directly from the file and hence + everyone's propagated writes would be visible. + + :param fd: the file descriptor of the file to sync. + :param offset: the byte range starting. + :param count: the number of bytes starting from offset. + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + if not isinstance(count, int): + raise TypeError('count must be an int') + + cdef: + int _fd = fd + int64_t _offset = offset + size_t _count = count + + with nogil: + ret = ceph_lazyio_synchronize(self.cluster, _fd, _offset, _count) + if ret < 0: + raise make_ex(ret, "lazyio_synchronize failed") + + def fallocate(self, fd, offset, length, mode=0): + """ + Preallocate or release disk space for the file for the byte range. + + :param fd: the file descriptor of the file to fallocate. + :param mode: the flags determines the operation to be performed on the given + range. default operation (0) allocate and initialize to zero + the file in the byte range, and the file size will be changed + if offset + length is greater than the file size. if the + FALLOC_FL_KEEP_SIZE flag is specified in the mode, the file size + will not be changed. if the FALLOC_FL_PUNCH_HOLE flag is specified + in the mode, the operation is deallocate space and zero the byte range. + :param offset: the byte range starting. + :param length: the length of the range. + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + if not isinstance(length, int): + raise TypeError('length must be an int') + + cdef: + int _fd = fd + int _mode = mode + int64_t _offset = offset + int64_t _length = length + + with nogil: + ret = ceph_fallocate(self.cluster, _fd, _mode, _offset, _length) + if ret < 0: + raise make_ex(ret, "fallocate failed") + + def getcwd(self) -> bytes: + """ + Get the current working directory. + + :returns: the path to the current working directory + """ + self.require_state("mounted") + with nogil: + ret = ceph_getcwd(self.cluster) + return ret + + def chdir(self, path): + """ + Change the current working directory. + + :param path: the path to the working directory to change into. + """ + self.require_state("mounted") + + path = cstr(path, 'path') + cdef char* _path = path + with nogil: + ret = ceph_chdir(self.cluster, _path) + if ret < 0: + raise make_ex(ret, "chdir failed") + + def opendir(self, path) -> DirResult: + """ + Open the given directory. + + :param path: the path name of the directory to open. Must be either an absolute path + or a path relative to the current working directory. + :returns: the open directory stream handle + """ + self.require_state("mounted") + + path = cstr(path, 'path') + cdef: + char* _path = path + ceph_dir_result* handle + with nogil: + ret = ceph_opendir(self.cluster, _path, &handle); + if ret < 0: + raise make_ex(ret, "opendir failed at {}".format(path.decode('utf-8'))) + d = DirResult() + d.lib = self + d.handle = handle + return d + + def readdir(self, DirResult handle) -> Optional[DirEntry]: + """ + Get the next entry in an open directory. + + :param handle: the open directory stream handle + :returns: the next directory entry or None if at the end of the + directory (or the directory is empty. This pointer + should not be freed by the caller, and is only safe to + access between return and the next call to readdir or + closedir. + """ + self.require_state("mounted") + + return handle.readdir() + + def closedir(self, DirResult handle): + """ + Close the open directory. + + :param handle: the open directory stream handle + """ + self.require_state("mounted") + + return handle.close() + + def opensnapdiff(self, root_path, rel_path, snap1name, snap2name) -> SnapDiffHandle: + """ + Open the given directory. + + :param path: the path name of the directory to open. Must be either an absolute path + or a path relative to the current working directory. + :returns: the open directory stream handle + """ + self.require_state("mounted") + + h = SnapDiffHandle(self) + root = cstr(root_path, 'root') + relp = cstr(rel_path, 'relp') + snap1 = cstr(snap1name, 'snap1') + snap2 = cstr(snap2name, 'snap2') + cdef: + char* _root = root + char* _relp = relp + char* _snap1 = snap1 + char* _snap2 = snap2 + with nogil: + ret = ceph_open_snapdiff(self.cluster, _root, _relp, _snap1, _snap2, &h.handle); + if ret < 0: + raise make_ex(ret, "open_snapdiff failed for {} vs. {}" + .format(snap1.decode('utf-8'), snap2.decode('utf-8'))) + h.opened = 1 + return h + + def rewinddir(self, DirResult handle): + """ + Rewind the directory stream to the beginning of the directory. + + :param handle: the open directory stream handle + """ + return handle.rewinddir() + + def telldir(self, DirResult handle): + """ + Get the current position of a directory stream. + + :param handle: the open directory stream handle + :return value: The position of the directory stream. Note that the offsets + returned by ceph_telldir do not have a particular order (cannot + be compared with inequality). + """ + return handle.telldir() + + def seekdir(self, DirResult handle, offset): + """ + Move the directory stream to a position specified by the given offset. + + :param handle: the open directory stream handle + :param offset: the position to move the directory stream to. This offset should be + a value returned by telldir. Note that this value does not refer to + the nth entry in a directory, and can not be manipulated with plus + or minus. + """ + return handle.seekdir(offset) + + def mkdir(self, path, mode): + """ + Create a directory. + + :param path: the path of the directory to create. This must be either an + absolute path or a relative path off of the current working directory. + :param mode: the permissions the directory should have once created. + """ + + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + cdef: + char* _path = path + int _mode = mode + with nogil: + ret = ceph_mkdir(self.cluster, _path, _mode) + if ret < 0: + raise make_ex(ret, "error in mkdir {}".format(path.decode('utf-8'))) + + def mksnap(self, path, name, mode, metadata={}) -> int: + """ + Create a snapshot. + + :param path: path of the directory to snapshot. + :param name: snapshot name + :param mode: permission of the snapshot + :param metadata: metadata key/value to store with the snapshot + + :raises: :class: `TypeError` + :raises: :class: `Error` + :returns: 0 on success + """ + + self.require_state("mounted") + path = cstr(path, 'path') + name = cstr(name, 'name') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + if not isinstance(metadata, dict): + raise TypeError('metadata must be an dictionary') + md = {} + for key, value in metadata.items(): + if not isinstance(key, str) or not isinstance(value, str): + raise TypeError('metadata key and values should be strings') + md[key.encode('utf-8')] = value.encode('utf-8') + cdef: + char* _path = path + char* _name = name + int _mode = mode + size_t nr = len(md) + snap_metadata *_snap_meta = malloc(nr * sizeof(snap_metadata)) + if nr and _snap_meta == NULL: + raise MemoryError("malloc failed") + i = 0 + for key, value in md.items(): + _snap_meta[i] = snap_metadata(key, value) + i += 1 + with nogil: + ret = ceph_mksnap(self.cluster, _path, _name, _mode, _snap_meta, nr) + free(_snap_meta) + if ret < 0: + raise make_ex(ret, "mksnap error") + return 0 + + def rmsnap(self, path, name) -> int: + """ + Remove a snapshot. + + :param path: path of the directory for removing snapshot + :param name: snapshot name + + :raises: :class: `Error` + :returns: 0 on success + """ + self.require_state("mounted") + path = cstr(path, 'path') + name = cstr(name, 'name') + cdef: + char* _path = path + char* _name = name + with nogil: + ret = ceph_rmsnap(self.cluster, _path, _name) + if ret < 0: + raise make_ex(ret, "rmsnap error") + return 0 + + def snap_info(self, path) -> Dict[str, Any]: + """ + Fetch sapshot info + + :param path: snapshot path + + :raises: :class: `Error` + :returns: snapshot metadata + """ + self.require_state("mounted") + path = cstr(path, 'path') + cdef: + char* _path = path + snap_info info + with nogil: + ret = ceph_get_snap_info(self.cluster, _path, &info) + if ret < 0: + raise make_ex(ret, "snap_info error") + md = {} + if info.nr_snap_metadata: + md = {snap_meta.key.decode('utf-8'): snap_meta.value.decode('utf-8') for snap_meta in + info.snap_metadata[:info.nr_snap_metadata]} + ceph_free_snap_info_buffer(&info) + return {'id': info.id, 'metadata': md} + + def chmod(self, path, mode) -> None: + """ + Change directory mode. + + :param path: the path of the directory to create. This must be either an + absolute path or a relative path off of the current working directory. + :param mode: the permissions the directory should have once created. + """ + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + cdef: + char* _path = path + int _mode = mode + with nogil: + ret = ceph_chmod(self.cluster, _path, _mode) + if ret < 0: + raise make_ex(ret, "error in chmod {}".format(path.decode('utf-8'))) + + def lchmod(self, path, mode) -> None: + """ + Change file mode. If the path is a symbolic link, it won't be dereferenced. + + :param path: the path of the file. This must be either an absolute path or + a relative path off of the current working directory. + :param mode: the permissions to be set . + """ + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + cdef: + char* _path = path + int _mode = mode + with nogil: + ret = ceph_lchmod(self.cluster, _path, _mode) + if ret < 0: + raise make_ex(ret, "error in chmod {}".format(path.decode('utf-8'))) + + def fchmod(self, fd, mode) : + """ + Change file mode based on fd. + + :param fd: the file descriptor of the file to change file mode + :param mode: the permissions to be set. + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + cdef: + int _fd = fd + int _mode = mode + with nogil: + ret = ceph_fchmod(self.cluster, _fd, _mode) + if ret < 0: + raise make_ex(ret, "error in fchmod") + + def chown(self, path, uid, gid, follow_symlink=True): + """ + Change directory ownership + + :param path: the path of the directory to change. + :param uid: the uid to set + :param gid: the gid to set + :param follow_symlink: perform the operation on the target file if @path + is a symbolic link (default) + """ + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(uid, int): + raise TypeError('uid must be an int') + elif not isinstance(gid, int): + raise TypeError('gid must be an int') + + cdef: + char* _path = path + int _uid = uid + int _gid = gid + if follow_symlink: + with nogil: + ret = ceph_chown(self.cluster, _path, _uid, _gid) + else: + with nogil: + ret = ceph_lchown(self.cluster, _path, _uid, _gid) + if ret < 0: + raise make_ex(ret, "error in chown {}".format(path.decode('utf-8'))) + + def lchown(self, path, uid, gid): + """ + Change ownership of a symbolic link + + :param path: the path of the symbolic link to change. + :param uid: the uid to set + :param gid: the gid to set + """ + self.chown(path, uid, gid, follow_symlink=False) + + def fchown(self, fd, uid, gid): + """ + Change file ownership + + :param fd: the file descriptor of the file to change ownership + :param uid: the uid to set + :param gid: the gid to set + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(uid, int): + raise TypeError('uid must be an int') + elif not isinstance(gid, int): + raise TypeError('gid must be an int') + + cdef: + int _fd = fd + int _uid = uid + int _gid = gid + with nogil: + ret = ceph_fchown(self.cluster, _fd, _uid, _gid) + if ret < 0: + raise make_ex(ret, "error in fchown") + + def mkdirs(self, path, mode): + """ + Create multiple directories at once. + + :param path: the full path of directories and sub-directories that should + be created. + :param mode: the permissions the directory should have once created + """ + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(mode, int): + raise TypeError('mode must be an int') + cdef: + char* _path = path + int _mode = mode + + with nogil: + ret = ceph_mkdirs(self.cluster, _path, _mode) + if ret < 0: + raise make_ex(ret, "error in mkdirs {}".format(path.decode('utf-8'))) + + def rmdir(self, path): + """ + Remove a directory. + + :param path: the path of the directory to remove. + """ + self.require_state("mounted") + path = cstr(path, 'path') + cdef char* _path = path + with nogil: + ret = ceph_rmdir(self.cluster, _path) + if ret < 0: + raise make_ex(ret, "error in rmdir {}".format(path.decode('utf-8'))) + + def open(self, path, flags, mode=0): + """ + Create and/or open a file. + + :param path: the path of the file to open. If the flags parameter includes O_CREAT, + the file will first be created before opening. + :param flags: set of option masks that control how the file is created/opened. + :param mode: the permissions to place on the file if the file does not exist and O_CREAT + is specified in the flags. + """ + self.require_state("mounted") + path = cstr(path, 'path') + + if not isinstance(mode, int): + raise TypeError('mode must be an int') + if isinstance(flags, str): + cephfs_flags = 0 + if flags == '': + cephfs_flags = os.O_RDONLY + else: + access_flags = 0; + for c in flags: + if c == 'r': + access_flags = 1; + elif c == 'w': + access_flags = 2; + cephfs_flags |= os.O_TRUNC | os.O_CREAT + elif access_flags > 0 and c == '+': + access_flags = 3; + else: + raise make_ex(CEPHFS_EOPNOTSUPP, + "open flags doesn't support %s" % c) + + if access_flags == 1: + cephfs_flags |= os.O_RDONLY; + elif access_flags == 2: + cephfs_flags |= os.O_WRONLY; + else: + cephfs_flags |= os.O_RDWR; + + elif isinstance(flags, int): + cephfs_flags = flags + else: + raise TypeError("flags must be a string or an integer") + + cdef: + char* _path = path + int _flags = cephfs_flags + int _mode = mode + + with nogil: + ret = ceph_open(self.cluster, _path, _flags, _mode) + if ret < 0: + raise make_ex(ret, "error in open {}".format(path.decode('utf-8'))) + return ret + + def close(self, fd): + """ + Close the open file. + + :param fd: the file descriptor referring to the open file. + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + cdef int _fd = fd + with nogil: + ret = ceph_close(self.cluster, _fd) + if ret < 0: + raise make_ex(ret, "error in close") + + def read(self, fd, offset, l): + """ + Read data from the file. + + :param fd: the file descriptor of the open file to read from. + :param offset: the offset in the file to read from. If this value is negative, the + function reads from the current offset of the file descriptor. + :param l: the flag to indicate what type of seeking to perform + """ + self.require_state("mounted") + if not isinstance(offset, int): + raise TypeError('offset must be an int') + if not isinstance(l, int): + raise TypeError('l must be an int') + if not isinstance(fd, int): + raise TypeError('fd must be an int') + cdef: + int _fd = fd + int64_t _offset = offset + int64_t _length = l + + char *ret_buf + PyObject* ret_s = NULL + + ret_s = PyBytes_FromStringAndSize(NULL, _length) + try: + ret_buf = PyBytes_AsString(ret_s) + with nogil: + ret = ceph_read(self.cluster, _fd, ret_buf, _length, _offset) + if ret < 0: + raise make_ex(ret, "error in read") + + if ret != _length: + _PyBytes_Resize(&ret_s, ret) + + return ret_s + finally: + # We DECREF unconditionally: the cast to object above will have + # INCREFed if necessary. This also takes care of exceptions, + # including if _PyString_Resize fails (that will free the string + # itself and set ret_s to NULL, hence XDECREF). + ref.Py_XDECREF(ret_s) + + def preadv(self, fd, buffers, offset): + """ + Write data to a file. + + :param fd: the file descriptor of the open file to read from + :param buffers: the list of byte object to read from the file + :param offset: the offset of the file read from. If this value is negative, the + function reads from the current offset of the file descriptor. + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(buffers, list): + raise TypeError('buffers must be a list') + for buf in buffers: + if not isinstance(buf, bytearray): + raise TypeError('buffers must be a list of bytes') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + + cdef: + int _fd = fd + int _iovcnt = len(buffers) + int64_t _offset = offset + iovec *_iov = to_iovec(buffers) + try: + with nogil: + ret = ceph_preadv(self.cluster, _fd, _iov, _iovcnt, _offset) + if ret < 0: + raise make_ex(ret, "error in preadv") + return ret + finally: + free(_iov) + + def write(self, fd, buf, offset): + """ + Write data to a file. + + :param fd: the file descriptor of the open file to write to + :param buf: the bytes to write to the file + :param offset: the offset of the file write into. If this value is negative, the + function writes to the current offset of the file descriptor. + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(buf, bytes): + raise TypeError('buf must be a bytes') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + + cdef: + int _fd = fd + char *_data = buf + int64_t _offset = offset + + size_t length = len(buf) + + with nogil: + ret = ceph_write(self.cluster, _fd, _data, length, _offset) + if ret < 0: + raise make_ex(ret, "error in write") + return ret + + def pwritev(self, fd, buffers, offset): + """ + Write data to a file. + + :param fd: the file descriptor of the open file to write to + :param buffers: the list of byte object to write to the file + :param offset: the offset of the file write into. If this value is negative, the + function writes to the current offset of the file descriptor. + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(buffers, list): + raise TypeError('buffers must be a list') + for buf in buffers: + if not isinstance(buf, bytes): + raise TypeError('buffers must be a list of bytes') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + + cdef: + int _fd = fd + int _iovcnt = len(buffers) + int64_t _offset = offset + iovec *_iov = to_iovec(buffers) + try: + with nogil: + ret = ceph_pwritev(self.cluster, _fd, _iov, _iovcnt, _offset) + if ret < 0: + raise make_ex(ret, "error in pwritev") + return ret + finally: + free(_iov) + + def flock(self, fd, operation, owner): + """ + Apply or remove an advisory lock. + + :param fd: the open file descriptor to change advisory lock. + :param operation: the advisory lock operation to be performed on the file + :param owner: the user-supplied owner identifier (an arbitrary integer) + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(operation, int): + raise TypeError('operation must be an int') + + cdef: + int _fd = fd + int _op = operation + uint64_t _owner = owner + + with nogil: + ret = ceph_flock(self.cluster, _fd, _op, _owner) + if ret < 0: + raise make_ex(ret, "error in write") + return ret + + def truncate(self, path, size): + """ + Truncate the file to the given size. If this operation causes the + file to expand, the empty bytes will be filled in with zeros. + + :param path: the path to the file to truncate. + :param size: the new size of the file. + """ + + if not isinstance(size, int): + raise TypeError('size must be a int') + + statx_dict = dict() + statx_dict["size"] = size + self.setattrx(path, statx_dict, CEPH_SETATTR_SIZE, AT_SYMLINK_NOFOLLOW) + + def ftruncate(self, fd, size): + """ + Truncate the file to the given size. If this operation causes the + file to expand, the empty bytes will be filled in with zeros. + + :param path: the path to the file to truncate. + :param size: the new size of the file. + """ + + if not isinstance(size, int): + raise TypeError('size must be a int') + + statx_dict = dict() + statx_dict["size"] = size + self.fsetattrx(fd, statx_dict, CEPH_SETATTR_SIZE) + + def mknod(self, path, mode, rdev=0): + """ + Make a block or character special file. + + :param path: the path to the special file. + :param mode: the permissions to use and the type of special file. The type can be + one of stat.S_IFREG, stat.S_IFCHR, stat.S_IFBLK, stat.S_IFIFO. Both + should be combined using bitwise OR. + :param rdev: If the file type is stat.S_IFCHR or stat.S_IFBLK then this parameter + specifies the major and minor numbers of the newly created device + special file. Otherwise, it is ignored. + """ + self.require_state("mounted") + path = cstr(path, 'path') + + if not isinstance(mode, int): + raise TypeError('mode must be an int') + if not isinstance(rdev, int): + raise TypeError('rdev must be an int') + + cdef: + char* _path = path + mode_t _mode = mode + dev_t _rdev = rdev + + with nogil: + ret = ceph_mknod(self.cluster, _path, _mode, _rdev) + if ret < 0: + raise make_ex(ret, "error in mknod {}".format(path.decode('utf-8'))) + + def getxattr(self, path, name, size=255, follow_symlink=True): + """ + Get an extended attribute. + + :param path: the path to the file + :param name: the name of the extended attribute to get + :param size: the size of the pre-allocated buffer + """ + self.require_state("mounted") + + path = cstr(path, 'path') + name = cstr(name, 'name') + + cdef: + char* _path = path + char* _name = name + + size_t ret_length = size + char *ret_buf = NULL + + try: + ret_buf = realloc_chk(ret_buf, ret_length) + if follow_symlink: + with nogil: + ret = ceph_getxattr(self.cluster, _path, _name, ret_buf, + ret_length) + else: + with nogil: + ret = ceph_lgetxattr(self.cluster, _path, _name, ret_buf, + ret_length) + + if ret < 0: + raise make_ex(ret, "error in getxattr") + + return ret_buf[:ret] + finally: + free(ret_buf) + + def fgetxattr(self, fd, name, size=255): + """ + Get an extended attribute given the fd of a file. + + :param fd: the open file descriptor referring to the file + :param name: the name of the extended attribute to get + :param size: the size of the pre-allocated buffer + """ + self.require_state("mounted") + + if not isinstance(fd, int): + raise TypeError('fd must be an int') + name = cstr(name, 'name') + + cdef: + int _fd = fd + char* _name = name + + size_t ret_length = size + char *ret_buf = NULL + + try: + ret_buf = realloc_chk(ret_buf, ret_length) + with nogil: + ret = ceph_fgetxattr(self.cluster, _fd, _name, ret_buf, + ret_length) + + if ret < 0: + raise make_ex(ret, "error in fgetxattr") + + return ret_buf[:ret] + finally: + free(ret_buf) + + def lgetxattr(self, path, name, size=255): + """ + Get an extended attribute without following symbolic links. This + function is identical to ceph_getxattr, but if the path refers to + a symbolic link, we get the extended attributes of the symlink + rather than the attributes of the file it points to. + + :param path: the path to the file + :param name: the name of the extended attribute to get + :param size: the size of the pre-allocated buffer + """ + + return self.getxattr(path, name, size=size, follow_symlink=False) + + def setxattr(self, path, name, value, flags, follow_symlink=True): + """ + Set an extended attribute on a file. + + :param path: the path to the file. + :param name: the name of the extended attribute to set. + :param value: the bytes of the extended attribute value + """ + self.require_state("mounted") + + name = cstr(name, 'name') + path = cstr(path, 'path') + if not isinstance(flags, int): + raise TypeError('flags must be a int') + if not isinstance(value, bytes): + raise TypeError('value must be a bytes') + + cdef: + char *_path = path + char *_name = name + char *_value = value + size_t _value_len = len(value) + int _flags = flags + + if follow_symlink: + with nogil: + ret = ceph_setxattr(self.cluster, _path, _name, + _value, _value_len, _flags) + else: + with nogil: + ret = ceph_lsetxattr(self.cluster, _path, _name, + _value, _value_len, _flags) + + if ret < 0: + raise make_ex(ret, "error in setxattr") + + def fsetxattr(self, fd, name, value, flags): + """ + Set an extended attribute on a file. + + :param fd: the open file descriptor referring to the file. + :param name: the name of the extended attribute to set. + :param value: the bytes of the extended attribute value + """ + self.require_state("mounted") + + name = cstr(name, 'name') + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(flags, int): + raise TypeError('flags must be a int') + if not isinstance(value, bytes): + raise TypeError('value must be a bytes') + + cdef: + int _fd = fd + char *_name = name + char *_value = value + size_t _value_len = len(value) + int _flags = flags + + with nogil: + ret = ceph_fsetxattr(self.cluster, _fd, _name, + _value, _value_len, _flags) + if ret < 0: + raise make_ex(ret, "error in fsetxattr") + + def lsetxattr(self, path, name, value, flags): + """ + Set an extended attribute on a file but do not follow symbolic link. + + :param path: the path to the file. + :param name: the name of the extended attribute to set. + :param value: the bytes of the extended attribute value + """ + + self.setxattr(path, name, value, flags, follow_symlink=False) + + def removexattr(self, path, name, follow_symlink=True): + """ + Remove an extended attribute of a file. + + :param path: path of the file. + :param name: name of the extended attribute to remove. + """ + self.require_state("mounted") + + name = cstr(name, 'name') + path = cstr(path, 'path') + + cdef: + char *_path = path + char *_name = name + + if follow_symlink: + with nogil: + ret = ceph_removexattr(self.cluster, _path, _name) + else: + with nogil: + ret = ceph_lremovexattr(self.cluster, _path, _name) + + if ret < 0: + raise make_ex(ret, "error in removexattr") + + def fremovexattr(self, fd, name): + """ + Remove an extended attribute of a file. + + :param fd: the open file descriptor referring to the file. + :param name: name of the extended attribute to remove. + """ + self.require_state("mounted") + + if not isinstance(fd, int): + raise TypeError('fd must be an int') + name = cstr(name, 'name') + + cdef: + int _fd = fd + char *_name = name + + with nogil: + ret = ceph_fremovexattr(self.cluster, _fd, _name) + if ret < 0: + raise make_ex(ret, "error in fremovexattr") + + def lremovexattr(self, path, name): + """ + Remove an extended attribute of a file but do not follow symbolic link. + + :param path: path of the file. + :param name: name of the extended attribute to remove. + """ + self.removexattr(path, name, follow_symlink=False) + + def listxattr(self, path, size=65536, follow_symlink=True): + """ + List the extended attribute keys set on a file. + + :param path: path of the file. + :param size: the size of list buffer to be filled with extended attribute keys. + """ + self.require_state("mounted") + + path = cstr(path, 'path') + + cdef: + char *_path = path + char *ret_buf = NULL + size_t ret_length = size + + try: + ret_buf = realloc_chk(ret_buf, ret_length) + if follow_symlink: + with nogil: + ret = ceph_listxattr(self.cluster, _path, ret_buf, ret_length) + else: + with nogil: + ret = ceph_llistxattr(self.cluster, _path, ret_buf, ret_length) + + if ret < 0: + raise make_ex(ret, "error in listxattr") + + return ret, ret_buf[:ret] + finally: + free(ret_buf) + + def flistxattr(self, fd, size=65536): + """ + List the extended attribute keys set on a file. + + :param fd: the open file descriptor referring to the file. + :param size: the size of list buffer to be filled with extended attribute keys. + """ + self.require_state("mounted") + + if not isinstance(fd, int): + raise TypeError('fd must be an int') + + cdef: + int _fd = fd + char *ret_buf = NULL + size_t ret_length = size + + try: + ret_buf = realloc_chk(ret_buf, ret_length) + with nogil: + ret = ceph_flistxattr(self.cluster, _fd, ret_buf, ret_length) + + if ret < 0: + raise make_ex(ret, "error in flistxattr") + + return ret, ret_buf[:ret] + finally: + free(ret_buf) + + def llistxattr(self, path, size=65536): + """ + List the extended attribute keys set on a file but do not follow symbolic link. + + :param path: path of the file. + :param size: the size of list buffer to be filled with extended attribute keys. + """ + + return self.listxattr(path, size=size, follow_symlink=False) + + def stat(self, path, follow_symlink=True): + """ + Get a file's extended statistics and attributes. + + :param path: the file or directory to get the statistics of. + """ + self.require_state("mounted") + path = cstr(path, 'path') + + cdef: + char* _path = path + statx stx + + if follow_symlink: + with nogil: + ret = ceph_statx(self.cluster, _path, &stx, + CEPH_STATX_BASIC_STATS_CDEF, 0) + else: + with nogil: + ret = ceph_statx(self.cluster, _path, &stx, + CEPH_STATX_BASIC_STATS_CDEF, AT_SYMLINK_NOFOLLOW_CDEF) + + if ret < 0: + raise make_ex(ret, "error in stat: {}".format(path.decode('utf-8'))) + return StatResult(st_dev=stx.stx_dev, st_ino=stx.stx_ino, + st_mode=stx.stx_mode, st_nlink=stx.stx_nlink, + st_uid=stx.stx_uid, st_gid=stx.stx_gid, + st_rdev=stx.stx_rdev, st_size=stx.stx_size, + st_blksize=stx.stx_blksize, + st_blocks=stx.stx_blocks, + st_atime=datetime.fromtimestamp(stx.stx_atime.tv_sec), + st_mtime=datetime.fromtimestamp(stx.stx_mtime.tv_sec), + st_ctime=datetime.fromtimestamp(stx.stx_ctime.tv_sec)) + + def lstat(self, path): + """ + Get a file's extended statistics and attributes. If the file is a + symbolic link, return the information of the link itself rather than + the information of the file it points to. + + :param path: the file or directory to get the statistics of. + """ + return self.stat(path, follow_symlink=False) + + def fstat(self, fd): + """ + Get an open file's extended statistics and attributes. + + :param fd: the file descriptor of the file to get statistics of. + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + + cdef: + int _fd = fd + statx stx + + with nogil: + ret = ceph_fstatx(self.cluster, _fd, &stx, + CEPH_STATX_BASIC_STATS_CDEF, 0) + if ret < 0: + raise make_ex(ret, "error in fsat") + return StatResult(st_dev=stx.stx_dev, st_ino=stx.stx_ino, + st_mode=stx.stx_mode, st_nlink=stx.stx_nlink, + st_uid=stx.stx_uid, st_gid=stx.stx_gid, + st_rdev=stx.stx_rdev, st_size=stx.stx_size, + st_blksize=stx.stx_blksize, + st_blocks=stx.stx_blocks, + st_atime=datetime.fromtimestamp(stx.stx_atime.tv_sec), + st_mtime=datetime.fromtimestamp(stx.stx_mtime.tv_sec), + st_ctime=datetime.fromtimestamp(stx.stx_ctime.tv_sec)) + + def statx(self, path, mask, flag): + """ + Get a file's extended statistics and attributes. + + :param path: the file or directory to get the statistics of. + :param mask: want bitfield of CEPH_STATX_* flags showing designed attributes. + :param flag: bitfield that can be used to set AT_* modifier flags (AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC, AT_STATX_DONT_SYNC and AT_SYMLINK_NOFOLLOW) + """ + + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(mask, int): + raise TypeError('flag must be a int') + if not isinstance(flag, int): + raise TypeError('flag must be a int') + + cdef: + char* _path = path + statx stx + int _mask = mask + int _flag = flag + dict_result = dict() + + with nogil: + ret = ceph_statx(self.cluster, _path, &stx, _mask, _flag) + if ret < 0: + raise make_ex(ret, "error in stat: %s" % path) + + if (_mask & CEPH_STATX_MODE): + dict_result["mode"] = stx.stx_mode + if (_mask & CEPH_STATX_NLINK): + dict_result["nlink"] = stx.stx_nlink + if (_mask & CEPH_STATX_UID): + dict_result["uid"] = stx.stx_uid + if (_mask & CEPH_STATX_GID): + dict_result["gid"] = stx.stx_gid + if (_mask & CEPH_STATX_RDEV): + dict_result["rdev"] = stx.stx_rdev + if (_mask & CEPH_STATX_ATIME): + dict_result["atime"] = datetime.fromtimestamp(stx.stx_atime.tv_sec) + if (_mask & CEPH_STATX_MTIME): + dict_result["mtime"] = datetime.fromtimestamp(stx.stx_mtime.tv_sec) + if (_mask & CEPH_STATX_CTIME): + dict_result["ctime"] = datetime.fromtimestamp(stx.stx_ctime.tv_sec) + if (_mask & CEPH_STATX_INO): + dict_result["ino"] = stx.stx_ino + if (_mask & CEPH_STATX_SIZE): + dict_result["size"] = stx.stx_size + if (_mask & CEPH_STATX_BLOCKS): + dict_result["blocks"] = stx.stx_blocks + if (_mask & CEPH_STATX_BTIME): + dict_result["btime"] = datetime.fromtimestamp(stx.stx_btime.tv_sec) + if (_mask & CEPH_STATX_VERSION): + dict_result["version"] = stx.stx_version + + return dict_result + + def setattrx(self, path, dict_stx, mask, flags): + """ + Set a file's attributes. + + :param path: the path to the file/directory to set the attributes of. + :param mask: a mask of all the CEPH_SETATTR_* values that have been set in the statx struct. + :param stx: a dict of statx structure that must include attribute values to set on the file. + :param flags: mask of AT_* flags (only AT_ATTR_NOFOLLOW is respected for now) + """ + + self.require_state("mounted") + path = cstr(path, 'path') + if not isinstance(dict_stx, dict): + raise TypeError('dict_stx must be a dict') + if not isinstance(mask, int): + raise TypeError('mask must be a int') + if not isinstance(flags, int): + raise TypeError('flags must be a int') + + cdef statx stx + + if (mask & CEPH_SETATTR_MODE): + stx.stx_mode = dict_stx["mode"] + if (mask & CEPH_SETATTR_UID): + stx.stx_uid = dict_stx["uid"] + if (mask & CEPH_SETATTR_GID): + stx.stx_gid = dict_stx["gid"] + if (mask & CEPH_SETATTR_MTIME): + stx.stx_mtime = to_timespec(dict_stx["mtime"].timestamp()) + if (mask & CEPH_SETATTR_ATIME): + stx.stx_atime = to_timespec(dict_stx["atime"].timestamp()) + if (mask & CEPH_SETATTR_CTIME): + stx.stx_ctime = to_timespec(dict_stx["ctime"].timestamp()) + if (mask & CEPH_SETATTR_SIZE): + stx.stx_size = dict_stx["size"] + if (mask & CEPH_SETATTR_BTIME): + stx.stx_btime = to_timespec(dict_stx["btime"].timestamp()) + + cdef: + char* _path = path + int _mask = mask + int _flags = flags + dict_result = dict() + + with nogil: + ret = ceph_setattrx(self.cluster, _path, &stx, _mask, _flags) + if ret < 0: + raise make_ex(ret, "error in setattrx: %s" % path) + + def fsetattrx(self, fd, dict_stx, mask): + """ + Set a file's attributes. + + :param path: the path to the file/directory to set the attributes of. + :param mask: a mask of all the CEPH_SETATTR_* values that have been set in the statx struct. + :param stx: a dict of statx structure that must include attribute values to set on the file. + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be a int') + if not isinstance(dict_stx, dict): + raise TypeError('dict_stx must be a dict') + if not isinstance(mask, int): + raise TypeError('mask must be a int') + + cdef statx stx + + if (mask & CEPH_SETATTR_MODE): + stx.stx_mode = dict_stx["mode"] + if (mask & CEPH_SETATTR_UID): + stx.stx_uid = dict_stx["uid"] + if (mask & CEPH_SETATTR_GID): + stx.stx_gid = dict_stx["gid"] + if (mask & CEPH_SETATTR_MTIME): + stx.stx_mtime = to_timespec(dict_stx["mtime"].timestamp()) + if (mask & CEPH_SETATTR_ATIME): + stx.stx_atime = to_timespec(dict_stx["atime"].timestamp()) + if (mask & CEPH_SETATTR_CTIME): + stx.stx_ctime = to_timespec(dict_stx["ctime"].timestamp()) + if (mask & CEPH_SETATTR_SIZE): + stx.stx_size = dict_stx["size"] + if (mask & CEPH_SETATTR_BTIME): + stx.stx_btime = to_timespec(dict_stx["btime"].timestamp()) + + cdef: + int _fd = fd + int _mask = mask + dict_result = dict() + + with nogil: + ret = ceph_fsetattrx(self.cluster, _fd, &stx, _mask) + if ret < 0: + raise make_ex(ret, "error in fsetattrx") + + def symlink(self, existing, newname): + """ + Creates a symbolic link. + + :param existing: the path to the existing file/directory to link to. + :param newname: the path to the new file/directory to link from. + """ + self.require_state("mounted") + existing = cstr(existing, 'existing') + newname = cstr(newname, 'newname') + cdef: + char* _existing = existing + char* _newname = newname + + with nogil: + ret = ceph_symlink(self.cluster, _existing, _newname) + if ret < 0: + raise make_ex(ret, "error in symlink") + + def link(self, existing, newname): + """ + Create a link. + + :param existing: the path to the existing file/directory to link to. + :param newname: the path to the new file/directory to link from. + """ + + self.require_state("mounted") + existing = cstr(existing, 'existing') + newname = cstr(newname, 'newname') + cdef: + char* _existing = existing + char* _newname = newname + + with nogil: + ret = ceph_link(self.cluster, _existing, _newname) + if ret < 0: + raise make_ex(ret, "error in link") + + def readlink(self, path, size) -> bytes: + """ + Read a symbolic link. + + :param path: the path to the symlink to read + :param size: the length of the buffer + :returns: buffer to hold the path of the file that the symlink points to. + """ + self.require_state("mounted") + path = cstr(path, 'path') + + cdef: + char* _path = path + int64_t _size = size + char *buf = NULL + + try: + buf = realloc_chk(buf, _size) + with nogil: + ret = ceph_readlink(self.cluster, _path, buf, _size) + if ret < 0: + raise make_ex(ret, "error in readlink") + return buf[:ret] + finally: + free(buf) + + def unlink(self, path): + """ + Removes a file, link, or symbolic link. If the file/link has multiple links to it, the + file will not disappear from the namespace until all references to it are removed. + + :param path: the path of the file or link to unlink. + """ + self.require_state("mounted") + path = cstr(path, 'path') + cdef char* _path = path + with nogil: + ret = ceph_unlink(self.cluster, _path) + if ret < 0: + raise make_ex(ret, "error in unlink: {}".format(path.decode('utf-8'))) + + def rename(self, src, dst): + """ + Rename a file or directory. + + :param src: the path to the existing file or directory. + :param dst: the new name of the file or directory. + """ + + self.require_state("mounted") + + src = cstr(src, 'source') + dst = cstr(dst, 'destination') + + cdef: + char* _src = src + char* _dst = dst + + with nogil: + ret = ceph_rename(self.cluster, _src, _dst) + if ret < 0: + raise make_ex(ret, "error in rename {} to {}".format(src.decode( + 'utf-8'), dst.decode('utf-8'))) + + def mds_command(self, mds_spec, args, input_data): + """ + :returns: 3-tuple of output status int, output status string, output data + """ + mds_spec = cstr(mds_spec, 'mds_spec') + args = cstr(args, 'args') + input_data = cstr(input_data, 'input_data') + + cdef: + char *_mds_spec = opt_str(mds_spec) + char **_cmd = to_bytes_array([args]) + size_t _cmdlen = 1 + + char *_inbuf = input_data + size_t _inbuf_len = len(input_data) + + char *_outbuf = NULL + size_t _outbuf_len = 0 + char *_outs = NULL + size_t _outs_len = 0 + + try: + with nogil: + ret = ceph_mds_command(self.cluster, _mds_spec, + _cmd, _cmdlen, + _inbuf, _inbuf_len, + &_outbuf, &_outbuf_len, + &_outs, &_outs_len) + my_outs = decode_cstr(_outs[:_outs_len]) + my_outbuf = _outbuf[:_outbuf_len] + if _outs_len: + ceph_buffer_free(_outs) + if _outbuf_len: + ceph_buffer_free(_outbuf) + return (ret, my_outbuf, my_outs) + finally: + free(_cmd) + + def umask(self, mode) : + self.require_state("mounted") + cdef: + mode_t _mode = mode + with nogil: + ret = ceph_umask(self.cluster, _mode) + if ret < 0: + raise make_ex(ret, "error in umask") + return ret + + def lseek(self, fd, offset, whence): + """ + Set the file's current position. + + :param fd: the file descriptor of the open file to read from. + :param offset: the offset in the file to read from. If this value is negative, the + function reads from the current offset of the file descriptor. + :param whence: the flag to indicate what type of seeking to performs:SEEK_SET, SEEK_CUR, SEEK_END + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if not isinstance(offset, int): + raise TypeError('offset must be an int') + if not isinstance(whence, int): + raise TypeError('whence must be an int') + + cdef: + int _fd = fd + int64_t _offset = offset + int64_t _whence = whence + + with nogil: + ret = ceph_lseek(self.cluster, _fd, _offset, _whence) + + if ret < 0: + raise make_ex(ret, "error in lseek") + + return ret + + def utime(self, path, times=None): + """ + Set access and modification time for path + + :param path: file path for which timestamps have to be changed + :param times: if times is not None, it must be a tuple (atime, mtime) + """ + + self.require_state("mounted") + path = cstr(path, 'path') + if times: + if not isinstance(times, tuple): + raise TypeError('times must be a tuple') + if not isinstance(times[0], int): + raise TypeError('atime must be an int') + if not isinstance(times[1], int): + raise TypeError('mtime must be an int') + actime = modtime = int(time.time()) + if times: + actime = times[0] + modtime = times[1] + + cdef: + char *pth = path + utimbuf buf = utimbuf(actime, modtime) + with nogil: + ret = ceph_utime(self.cluster, pth, &buf) + if ret < 0: + raise make_ex(ret, "error in utime {}".format(path.decode('utf-8'))) + + def futime(self, fd, times=None): + """ + Set access and modification time for a file pointed by descriptor + + :param fd: file descriptor of the open file + :param times: if times is not None, it must be a tuple (atime, mtime) + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if times: + if not isinstance(times, tuple): + raise TypeError('times must be a tuple') + if not isinstance(times[0], int): + raise TypeError('atime must be an int') + if not isinstance(times[1], int): + raise TypeError('mtime must be an int') + actime = modtime = int(time.time()) + if times: + actime = times[0] + modtime = times[1] + + cdef: + int _fd = fd + utimbuf buf = utimbuf(actime, modtime) + with nogil: + ret = ceph_futime(self.cluster, _fd, &buf) + if ret < 0: + raise make_ex(ret, "error in futime") + + def utimes(self, path, times=None, follow_symlink=True): + """ + Set access and modification time for path + + :param path: file path for which timestamps have to be changed + :param times: if times is not None, it must be a tuple (atime, mtime) + :param follow_symlink: perform the operation on the target file if @path + is a symbolic link (default) + """ + + self.require_state("mounted") + path = cstr(path, 'path') + if times: + if not isinstance(times, tuple): + raise TypeError('times must be a tuple') + if not isinstance(times[0], (int, float)): + raise TypeError('atime must be an int or a float') + if not isinstance(times[1], (int, float)): + raise TypeError('mtime must be an int or a float') + actime = modtime = time.time() + if times: + actime = float(times[0]) + modtime = float(times[1]) + + cdef: + char *pth = path + timeval *buf = [to_timeval(actime), to_timeval(modtime)] + if follow_symlink: + with nogil: + ret = ceph_utimes(self.cluster, pth, buf) + else: + with nogil: + ret = ceph_lutimes(self.cluster, pth, buf) + if ret < 0: + raise make_ex(ret, "error in utimes {}".format(path.decode('utf-8'))) + + def lutimes(self, path, times=None): + """ + Set access and modification time for a file. If the file is a symbolic + link do not follow to the target. + + :param path: file path for which timestamps have to be changed + :param times: if times is not None, it must be a tuple (atime, mtime) + """ + self.utimes(path, times=times, follow_symlink=False) + + def futimes(self, fd, times=None): + """ + Set access and modification time for a file pointer by descriptor + + :param fd: file descriptor of the open file + :param times: if times is not None, it must be a tuple (atime, mtime) + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if times: + if not isinstance(times, tuple): + raise TypeError('times must be a tuple') + if not isinstance(times[0], (int, float)): + raise TypeError('atime must be an int or a float') + if not isinstance(times[1], (int, float)): + raise TypeError('mtime must be an int or a float') + actime = modtime = time.time() + if times: + actime = float(times[0]) + modtime = float(times[1]) + + cdef: + int _fd = fd + timeval *buf = [to_timeval(actime), to_timeval(modtime)] + with nogil: + ret = ceph_futimes(self.cluster, _fd, buf) + if ret < 0: + raise make_ex(ret, "error in futimes") + + def futimens(self, fd, times=None): + """ + Set access and modification time for a file pointer by descriptor + + :param fd: file descriptor of the open file + :param times: if times is not None, it must be a tuple (atime, mtime) + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + if times: + if not isinstance(times, tuple): + raise TypeError('times must be a tuple') + if not isinstance(times[0], (int, float)): + raise TypeError('atime must be an int or a float') + if not isinstance(times[1], (int, float)): + raise TypeError('mtime must be an int or a float') + actime = modtime = time.time() + if times: + actime = float(times[0]) + modtime = float(times[1]) + + cdef: + int _fd = fd + timespec *buf = [to_timespec(actime), to_timespec(modtime)] + with nogil: + ret = ceph_futimens(self.cluster, _fd, buf) + if ret < 0: + raise make_ex(ret, "error in futimens") + + def get_file_replication(self, fd): + """ + Get the file replication information from an open file descriptor. + + :param fd: the open file descriptor referring to the file to get + the replication information of. + """ + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + + cdef: + int _fd = fd + + with nogil: + ret = ceph_get_file_replication(self.cluster, _fd) + if ret < 0: + raise make_ex(ret, "error in get_file_replication") + + return ret + + def get_path_replication(self, path): + """ + Get the file replication information given the path. + + :param path: the path of the file/directory to get the replication information of. + """ + self.require_state("mounted") + path = cstr(path, 'path') + + cdef: + char* _path = path + + with nogil: + ret = ceph_get_path_replication(self.cluster, _path) + if ret < 0: + raise make_ex(ret, "error in get_path_replication") + + return ret + + def get_pool_id(self, pool_name): + """ + Get the id of the named pool. + + :param pool_name: the name of the pool. + """ + + self.require_state("mounted") + pool_name = cstr(pool_name, 'pool_name') + + cdef: + char* _pool_name = pool_name + + with nogil: + ret = ceph_get_pool_id(self.cluster, _pool_name) + if ret < 0: + raise make_ex(ret, "error in get_pool_id") + + return ret + + def get_pool_replication(self, pool_id): + """ + Get the pool replication factor. + + :param pool_id: the pool id to look up + """ + + self.require_state("mounted") + if not isinstance(pool_id, int): + raise TypeError('pool_id must be an int') + + cdef: + int _pool_id = pool_id + + with nogil: + ret = ceph_get_pool_replication(self.cluster, _pool_id) + if ret < 0: + raise make_ex(ret, "error in get_pool_replication") + + return ret + + def debug_get_fd_caps(self, fd): + """ + Get the capabilities currently issued to the client given the fd. + + :param fd: the file descriptor to get issued + """ + + self.require_state("mounted") + if not isinstance(fd, int): + raise TypeError('fd must be an int') + + cdef: + int _fd = fd + + with nogil: + ret = ceph_debug_get_fd_caps(self.cluster, _fd) + if ret < 0: + raise make_ex(ret, "error in debug_get_fd_caps") + + return ret + + def debug_get_file_caps(self, path): + """ + Get the capabilities currently issued to the client given the path. + + :param path: the path of the file/directory to get the capabilities of. + """ + + self.require_state("mounted") + path = cstr(path, 'path') + + cdef: + char* _path = path + + with nogil: + ret = ceph_debug_get_file_caps(self.cluster, _path) + if ret < 0: + raise make_ex(ret, "error in debug_get_file_caps") + + return ret + + def get_cap_return_timeout(self): + """ + Get the amount of time that the client has to return caps + + In the event that a client does not return its caps, the MDS may blocklist + it after this timeout. Applications should check this value and ensure + that they set the delegation timeout to a value lower than this. + """ + + self.require_state("mounted") + + with nogil: + ret = ceph_get_cap_return_timeout(self.cluster) + if ret < 0: + raise make_ex(ret, "error in get_cap_return_timeout") + + return ret + + def set_uuid(self, uuid): + """ + Set ceph client uuid. Must be called before mount. + + :param uuid: the uuid to set + """ + + uuid = cstr(uuid, 'uuid') + + cdef: + char* _uuid = uuid + + with nogil: + ceph_set_uuid(self.cluster, _uuid) + + def set_session_timeout(self, timeout): + """ + Set ceph client session timeout. Must be called before mount. + + :param timeout: the timeout to set + """ + + if not isinstance(timeout, int): + raise TypeError('timeout must be an int') + + cdef: + int _timeout = timeout + + with nogil: + ceph_set_session_timeout(self.cluster, _timeout) + + def get_layout(self, fd): + """ + Get the file layout from an open file descriptor. + + :param fd: the open file descriptor referring to the file to get the layout of. + """ + + if not isinstance(fd, int): + raise TypeError('fd must be an int') + + cdef: + int _fd = fd + int stripe_unit + int stripe_count + int object_size + int pool_id + char *buf = NULL + int buflen = 256 + dict_result = dict() + + with nogil: + ret = ceph_get_file_layout(self.cluster, _fd, &stripe_unit, &stripe_count, &object_size, &pool_id) + if ret < 0: + raise make_ex(stripe_unit, "error in get_file_layout") + dict_result["stripe_unit"] = stripe_unit + dict_result["stripe_count"] = stripe_count + dict_result["object_size"] = object_size + dict_result["pool_id"] = pool_id + + try: + while True: + buf = realloc_chk(buf, buflen) + with nogil: + ret = ceph_get_file_pool_name(self.cluster, _fd, buf, buflen) + if ret > 0: + dict_result["pool_name"] = decode_cstr(buf) + return dict_result + elif ret == -CEPHFS_ERANGE: + buflen = buflen * 2 + else: + raise make_ex(ret, "error in get_file_pool_name") + finally: + free(buf) + + + def get_default_pool(self): + """ + Get the default pool name and id of cephfs. This returns dict{pool_name, pool_id}. + """ + + cdef: + char *buf = NULL + int buflen = 256 + dict_result = dict() + + try: + while True: + buf = realloc_chk(buf, buflen) + with nogil: + ret = ceph_get_default_data_pool_name(self.cluster, buf, buflen) + if ret > 0: + dict_result["pool_name"] = decode_cstr(buf) + break + elif ret == -CEPHFS_ERANGE: + buflen = buflen * 2 + else: + raise make_ex(ret, "error in get_default_data_pool_name") + + with nogil: + ret = ceph_get_pool_id(self.cluster, buf) + if ret < 0: + raise make_ex(ret, "error in get_pool_id") + dict_result["pool_id"] = ret + return dict_result + + finally: + free(buf) diff --git a/src/pybind/cephfs/mock_cephfs.pxi b/src/pybind/cephfs/mock_cephfs.pxi new file mode 100644 index 000000000..54b27d04c --- /dev/null +++ b/src/pybind/cephfs/mock_cephfs.pxi @@ -0,0 +1,259 @@ +# cython: embedsignature=True + +from libc.stdint cimport * +from types cimport timespec + + +cdef: + cdef struct statx "ceph_statx": + uint32_t stx_mask + uint32_t stx_blksize + uint32_t stx_nlink + uint32_t stx_uid + uint32_t stx_gid + uint16_t stx_mode + uint64_t stx_ino + uint64_t stx_size + uint64_t stx_blocks + uint64_t stx_dev + uint64_t stx_rdev + timespec stx_atime + timespec stx_ctime + timespec stx_mtime + timespec stx_btime + uint64_t stx_version + +cdef nogil: + cdef struct ceph_mount_info: + int dummy + + cdef struct ceph_dir_result: + int dummy + + cdef struct snap_metadata: + const char *key + const char *value + + cdef struct snap_info: + uint64_t id + size_t nr_snap_metadata + snap_metadata *snap_metadata + + cdef struct ceph_snapdiff_info: + int dummy + + cdef struct ceph_snapdiff_entry_t: + int dummy + + ctypedef void* rados_t + + const char *ceph_version(int *major, int *minor, int *patch): + pass + + int ceph_create(ceph_mount_info **cmount, const char * const id): + pass + int ceph_create_from_rados(ceph_mount_info **cmount, rados_t cluster): + pass + int ceph_init(ceph_mount_info *cmount): + pass + void ceph_shutdown(ceph_mount_info *cmount): + pass + + int ceph_getaddrs(ceph_mount_info* cmount, char** addrs): + pass + int64_t ceph_get_fs_cid(ceph_mount_info *cmount): + pass + int ceph_conf_read_file(ceph_mount_info *cmount, const char *path_list): + pass + int ceph_conf_parse_argv(ceph_mount_info *cmount, int argc, const char **argv): + pass + int ceph_conf_get(ceph_mount_info *cmount, const char *option, char *buf, size_t len): + pass + int ceph_conf_set(ceph_mount_info *cmount, const char *option, const char *value): + pass + int ceph_set_mount_timeout(ceph_mount_info *cmount, uint32_t timeout): + pass + + int ceph_mount(ceph_mount_info *cmount, const char *root): + pass + int ceph_select_filesystem(ceph_mount_info *cmount, const char *fs_name): + pass + int ceph_unmount(ceph_mount_info *cmount): + pass + int ceph_abort_conn(ceph_mount_info *cmount): + pass + uint64_t ceph_get_instance_id(ceph_mount_info *cmount): + pass + int ceph_fstatx(ceph_mount_info *cmount, int fd, statx *stx, unsigned want, unsigned flags): + pass + int ceph_statx(ceph_mount_info *cmount, const char *path, statx *stx, unsigned want, unsigned flags): + pass + int ceph_statfs(ceph_mount_info *cmount, const char *path, statvfs *stbuf): + pass + + int ceph_setattrx(ceph_mount_info *cmount, const char *relpath, statx *stx, int mask, int flags): + pass + int ceph_fsetattrx(ceph_mount_info *cmount, int fd, statx *stx, int mask): + pass + int ceph_mds_command(ceph_mount_info *cmount, const char *mds_spec, const char **cmd, size_t cmdlen, + const char *inbuf, size_t inbuflen, char **outbuf, size_t *outbuflen, + char **outs, size_t *outslen): + pass + int ceph_rename(ceph_mount_info *cmount, const char *from_, const char *to): + pass + int ceph_link(ceph_mount_info *cmount, const char *existing, const char *newname): + pass + int ceph_unlink(ceph_mount_info *cmount, const char *path): + pass + int ceph_symlink(ceph_mount_info *cmount, const char *existing, const char *newname): + pass + int ceph_readlink(ceph_mount_info *cmount, const char *path, char *buf, int64_t size): + pass + int ceph_setxattr(ceph_mount_info *cmount, const char *path, const char *name, + const void *value, size_t size, int flags): + pass + int ceph_fsetxattr(ceph_mount_info *cmount, int fd, const char *name, + const void *value, size_t size, int flags): + pass + int ceph_lsetxattr(ceph_mount_info *cmount, const char *path, const char *name, + const void *value, size_t size, int flags): + pass + int ceph_getxattr(ceph_mount_info *cmount, const char *path, const char *name, + void *value, size_t size): + pass + int ceph_fgetxattr(ceph_mount_info *cmount, int fd, const char *name, + void *value, size_t size): + pass + int ceph_lgetxattr(ceph_mount_info *cmount, const char *path, const char *name, + void *value, size_t size): + pass + int ceph_removexattr(ceph_mount_info *cmount, const char *path, const char *name): + pass + int ceph_fremovexattr(ceph_mount_info *cmount, int fd, const char *name): + pass + int ceph_lremovexattr(ceph_mount_info *cmount, const char *path, const char *name): + pass + int ceph_listxattr(ceph_mount_info *cmount, const char *path, char *list, size_t size): + pass + int ceph_flistxattr(ceph_mount_info *cmount, int fd, char *list, size_t size): + pass + int ceph_llistxattr(ceph_mount_info *cmount, const char *path, char *list, size_t size): + pass + int ceph_write(ceph_mount_info *cmount, int fd, const char *buf, int64_t size, int64_t offset): + pass + int ceph_pwritev(ceph_mount_info *cmount, int fd, iovec *iov, int iovcnt, int64_t offset): + pass + int ceph_read(ceph_mount_info *cmount, int fd, char *buf, int64_t size, int64_t offset): + pass + int ceph_preadv(ceph_mount_info *cmount, int fd, iovec *iov, int iovcnt, int64_t offset): + pass + int ceph_flock(ceph_mount_info *cmount, int fd, int operation, uint64_t owner): + pass + int ceph_mknod(ceph_mount_info *cmount, const char *path, mode_t mode, dev_t rdev): + pass + int ceph_close(ceph_mount_info *cmount, int fd): + pass + int ceph_open(ceph_mount_info *cmount, const char *path, int flags, mode_t mode): + pass + int ceph_mkdir(ceph_mount_info *cmount, const char *path, mode_t mode): + pass + int ceph_mksnap(ceph_mount_info *cmount, const char *path, const char *name, mode_t mode, snap_metadata *snap_metadata, size_t nr_snap_metadata): + pass + int ceph_rmsnap(ceph_mount_info *cmount, const char *path, const char *name): + pass + int ceph_get_snap_info(ceph_mount_info *cmount, const char *path, snap_info *snap_info): + pass + void ceph_free_snap_info_buffer(snap_info *snap_info): + pass + int ceph_mkdirs(ceph_mount_info *cmount, const char *path, mode_t mode): + pass + int ceph_closedir(ceph_mount_info *cmount, ceph_dir_result *dirp): + pass + int ceph_opendir(ceph_mount_info *cmount, const char *name, ceph_dir_result **dirpp): + pass + void ceph_rewinddir(ceph_mount_info *cmount, ceph_dir_result *dirp): + pass + int64_t ceph_telldir(ceph_mount_info *cmount, ceph_dir_result *dirp): + pass + void ceph_seekdir(ceph_mount_info *cmount, ceph_dir_result *dirp, int64_t offset): + pass + int ceph_chdir(ceph_mount_info *cmount, const char *path): + pass + dirent * ceph_readdir(ceph_mount_info *cmount, ceph_dir_result *dirp): + pass + int ceph_open_snapdiff(ceph_mount_info *cmount, const char *root_path, const char *rel_path, const char *snap1path, const char *snap2root, ceph_snapdiff_info *out): + pass + int ceph_readdir_snapdiff(ceph_snapdiff_info *snapdiff, ceph_snapdiff_entry_t *out): + pass + int ceph_close_snapdiff(ceph_snapdiff_info *snapdiff): + pass + int ceph_rmdir(ceph_mount_info *cmount, const char *path): + pass + const char* ceph_getcwd(ceph_mount_info *cmount): + pass + int ceph_sync_fs(ceph_mount_info *cmount): + pass + int ceph_fsync(ceph_mount_info *cmount, int fd, int syncdataonly): + pass + int ceph_lazyio(ceph_mount_info *cmount, int fd, int enable): + pass + int ceph_lazyio_propagate(ceph_mount_info *cmount, int fd, int64_t offset, size_t count): + pass + int ceph_lazyio_synchronize(ceph_mount_info *cmount, int fd, int64_t offset, size_t count): + pass + int ceph_fallocate(ceph_mount_info *cmount, int fd, int mode, int64_t offset, int64_t length): + pass + int ceph_chmod(ceph_mount_info *cmount, const char *path, mode_t mode): + pass + int ceph_lchmod(ceph_mount_info *cmount, const char *path, mode_t mode): + pass + int ceph_fchmod(ceph_mount_info *cmount, int fd, mode_t mode): + pass + int ceph_chown(ceph_mount_info *cmount, const char *path, int uid, int gid): + pass + int ceph_lchown(ceph_mount_info *cmount, const char *path, int uid, int gid): + pass + int ceph_fchown(ceph_mount_info *cmount, int fd, int uid, int gid): + pass + int64_t ceph_lseek(ceph_mount_info *cmount, int fd, int64_t offset, int whence): + pass + void ceph_buffer_free(char *buf): + pass + mode_t ceph_umask(ceph_mount_info *cmount, mode_t mode): + pass + int ceph_utime(ceph_mount_info *cmount, const char *path, utimbuf *buf): + pass + int ceph_futime(ceph_mount_info *cmount, int fd, utimbuf *buf): + pass + int ceph_utimes(ceph_mount_info *cmount, const char *path, timeval times[2]): + pass + int ceph_lutimes(ceph_mount_info *cmount, const char *path, timeval times[2]): + pass + int ceph_futimes(ceph_mount_info *cmount, int fd, timeval times[2]): + pass + int ceph_futimens(ceph_mount_info *cmount, int fd, timespec times[2]): + pass + int ceph_get_file_replication(ceph_mount_info *cmount, int fh): + pass + int ceph_get_path_replication(ceph_mount_info *cmount, const char *path): + pass + int ceph_get_pool_id(ceph_mount_info *cmount, const char *pool_name): + pass + int ceph_get_pool_replication(ceph_mount_info *cmount, int pool_id): + pass + int ceph_debug_get_fd_caps(ceph_mount_info *cmount, int fd): + pass + int ceph_debug_get_file_caps(ceph_mount_info *cmount, const char *path): + pass + uint32_t ceph_get_cap_return_timeout(ceph_mount_info *cmount): + pass + void ceph_set_uuid(ceph_mount_info *cmount, const char *uuid): + pass + void ceph_set_session_timeout(ceph_mount_info *cmount, unsigned timeout): + pass + int ceph_get_file_layout(ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool): + pass + int ceph_get_file_pool_name(ceph_mount_info *cmount, int fh, char *buf, size_t buflen): + pass + int ceph_get_default_data_pool_name(ceph_mount_info *cmount, char *buf, size_t buflen): + pass diff --git a/src/pybind/cephfs/setup.py b/src/pybind/cephfs/setup.py new file mode 100755 index 000000000..f6c2025f7 --- /dev/null +++ b/src/pybind/cephfs/setup.py @@ -0,0 +1,215 @@ +import os +import pkgutil +import shutil +import subprocess +import sys +import tempfile +import textwrap +if not pkgutil.find_loader('setuptools'): + from distutils.core import setup + from distutils.extension import Extension +else: + from setuptools import setup + from setuptools.extension import Extension +from distutils.ccompiler import new_compiler +from distutils.errors import CompileError, LinkError +from itertools import filterfalse, takewhile +import distutils.sysconfig + + +def filter_unsupported_flags(compiler, flags): + args = takewhile(lambda argv: not argv.startswith('-'), [compiler] + flags) + if any('clang' in arg for arg in args): + return list(filterfalse(lambda f: + f in ('-mcet', + '-fstack-clash-protection', + '-fno-var-tracking-assignments', + '-Wno-deprecated-register', + '-Wno-gnu-designator') or + f.startswith('-fcf-protection'), + flags)) + else: + return flags + + +def monkey_with_compiler(customize): + def patched(compiler): + customize(compiler) + if compiler.compiler_type != 'unix': + return + compiler.compiler[1:] = \ + filter_unsupported_flags(compiler.compiler[0], + compiler.compiler[1:]) + compiler.compiler_so[1:] = \ + filter_unsupported_flags(compiler.compiler_so[0], + compiler.compiler_so[1:]) + return patched + + +distutils.sysconfig.customize_compiler = \ + monkey_with_compiler(distutils.sysconfig.customize_compiler) + +# PEP 440 versioning of the Ceph FS package on PyPI +# Bump this version, after every changeset + +__version__ = '2.0.0' + + +def get_python_flags(libs): + py_libs = sum((libs.split() for libs in + distutils.sysconfig.get_config_vars('LIBS', 'SYSLIBS')), []) + ldflags = list(filterfalse(lambda lib: lib.startswith('-l'), py_libs)) + py_libs = [lib.replace('-l', '') for lib in + filter(lambda lib: lib.startswith('-l'), py_libs)] + compiler = new_compiler() + distutils.sysconfig.customize_compiler(compiler) + return dict( + include_dirs=[distutils.sysconfig.get_python_inc()], + library_dirs=distutils.sysconfig.get_config_vars('LIBDIR', 'LIBPL'), + libraries=libs + py_libs, + extra_compile_args=filter_unsupported_flags( + compiler.compiler[0], + compiler.compiler[1:] + distutils.sysconfig.get_config_var('CFLAGS').split()), + extra_link_args=(distutils.sysconfig.get_config_var('LDFLAGS').split() + + ldflags)) + + +def check_sanity(): + """ + Test if development headers and library for cephfs is available by compiling a dummy C program. + """ + CEPH_SRC_DIR = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..', + '..' + ) + + tmp_dir = tempfile.mkdtemp(dir=os.environ.get('TMPDIR', os.path.dirname(__file__))) + tmp_file = os.path.join(tmp_dir, 'cephfs_dummy.c') + + with open(tmp_file, 'w') as fp: + dummy_prog = textwrap.dedent(""" + #include + #include "cephfs/libcephfs.h" + + int main(void) { + struct ceph_mount_info *cmount = NULL; + ceph_init(cmount); + return 0; + } + """) + fp.write(dummy_prog) + + compiler = new_compiler() + distutils.sysconfig.customize_compiler(compiler) + + if 'CEPH_LIBDIR' in os.environ: + # The setup.py has been invoked by a top-level Ceph make. + # Set the appropriate CFLAGS and LDFLAGS + compiler.set_library_dirs([os.environ.get('CEPH_LIBDIR')]) + + try: + compiler.define_macro('_FILE_OFFSET_BITS', '64') + + link_objects = compiler.compile( + sources=[tmp_file], + output_dir=tmp_dir, + extra_preargs=['-iquote{path}'.format(path=os.path.join(CEPH_SRC_DIR, 'include'))] + ) + + compiler.link_executable( + objects=link_objects, + output_progname=os.path.join(tmp_dir, 'cephfs_dummy'), + libraries=['cephfs'], + output_dir=tmp_dir, + ) + + except CompileError: + print('\nCompile Error: Ceph FS development headers not found', file=sys.stderr) + return False + except LinkError: + print('\nLink Error: Ceph FS library not found', file=sys.stderr) + return False + else: + return True + finally: + shutil.rmtree(tmp_dir) + + +if 'BUILD_DOC' in os.environ or 'READTHEDOCS' in os.environ: + ext_args = {} + cython_constants = dict(BUILD_DOC=True) + cythonize_args = dict(compile_time_env=cython_constants) +elif check_sanity(): + ext_args = get_python_flags(['cephfs']) + cython_constants = dict(BUILD_DOC=False) + include_path = [os.path.join(os.path.dirname(__file__), "..", "rados")] + cythonize_args = dict(compile_time_env=cython_constants, + include_path=include_path) +else: + sys.exit(1) + +cmdclass = {} +try: + from Cython.Build import cythonize + from Cython.Distutils import build_ext + + cmdclass = {'build_ext': build_ext} +except ImportError: + print("WARNING: Cython is not installed.") + + if not os.path.isfile('cephfs.c'): + print('ERROR: Cannot find Cythonized file cephfs.c', file=sys.stderr) + sys.exit(1) + else: + def cythonize(x, **kwargs): + return x + + source = "cephfs.c" +else: + source = "cephfs.pyx" + +# Disable cythonification if we're not really building anything +if (len(sys.argv) >= 2 and + any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version') + )): + def cythonize(x, **kwargs): + return x + +setup( + name='cephfs', + version=__version__, + description="Python bindings for the Ceph FS library", + long_description=( + "This package contains Python bindings for interacting with the " + "Ceph Filesystem (Ceph FS) library. Ceph FS is a POSIX-compliant " + "filesystem that uses a Ceph Storage Cluster to store its data. The " + "Ceph filesystem uses the same Ceph Storage Cluster system as " + "Ceph Block Devices, Ceph Object Storage with its S3 and Swift APIs, " + "or native bindings (librados)." + ), + url='https://github.com/ceph/ceph/tree/master/src/pybind/cephfs', + license='LGPLv2+', + platforms='Linux', + ext_modules=cythonize( + [ + Extension( + "cephfs", + [source], + **ext_args + ) + ], + compiler_directives={'language_level': sys.version_info.major}, + build_dir=os.environ.get("CYTHON_BUILD_DIR", None), + **cythonize_args + ), + classifiers=[ + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Cython', + 'Programming Language :: Python :: 3', + ], + cmdclass=cmdclass, +) diff --git a/src/pybind/cephfs/types.pxd b/src/pybind/cephfs/types.pxd new file mode 100644 index 000000000..d20ea87dc --- /dev/null +++ b/src/pybind/cephfs/types.pxd @@ -0,0 +1,55 @@ +cdef extern from "time.h": + ctypedef long int time_t + cdef struct timespec: + time_t tv_sec + long int tv_nsec + +cdef extern from "": + cdef struct utimbuf: + time_t actime + time_t modtime + +cdef extern from "sys/types.h": + ctypedef unsigned long mode_t + ctypedef unsigned long dev_t + +cdef extern from "sys/time.h": + cdef struct timeval: + long tv_sec + long tv_usec + +cdef extern from "sys/statvfs.h": + cdef struct statvfs: + unsigned long int f_bsize + unsigned long int f_frsize + unsigned long int f_blocks + unsigned long int f_bfree + unsigned long int f_bavail + unsigned long int f_files + unsigned long int f_ffree + unsigned long int f_favail + unsigned long int f_fsid + unsigned long int f_flag + unsigned long int f_namemax + unsigned long int f_padding[32] + +cdef extern from "": + cdef struct iovec: + void *iov_base + size_t iov_len + +IF UNAME_SYSNAME == "FreeBSD" or UNAME_SYSNAME == "Darwin": + cdef extern from "dirent.h": + cdef struct dirent: + long int d_ino + unsigned short int d_reclen + unsigned char d_type + char d_name[256] +ELSE: + cdef extern from "dirent.h": + cdef struct dirent: + long int d_ino + unsigned long int d_off + unsigned short int d_reclen + unsigned char d_type + char d_name[256] diff --git a/src/pybind/mgr/.gitignore b/src/pybind/mgr/.gitignore new file mode 100644 index 000000000..642616e09 --- /dev/null +++ b/src/pybind/mgr/.gitignore @@ -0,0 +1,17 @@ +proxy.conf.json + +# tox related +.coverage* +htmlcov +.tox +coverage.xml +junit*xml +.cache + +# IDE +.vscode +*.egg +.env + +# virtualenv +venv diff --git a/src/pybind/mgr/.pylintrc b/src/pybind/mgr/.pylintrc new file mode 100644 index 000000000..8cab074d9 --- /dev/null +++ b/src/pybind/mgr/.pylintrc @@ -0,0 +1,593 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10.0 + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=print-statement, + parameter-unpacking, + unpacking-in-except, + old-raise-syntax, + backtick, + long-suffix, + old-ne-operator, + old-octal-literal, + import-star-module-level, + non-ascii-bytes-literal, + raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + indexing-exception, + raising-string, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating, + deprecated-operator-function, + deprecated-urllib-function, + xreadlines-attribute, + deprecated-sys-function, + exception-escape, + comprehension-escape + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: en_AG (hunspell), en_AU +# (hunspell), en_BS (hunspell), en_BW (hunspell), en_BZ (hunspell), en_CA +# (hunspell), en_DK (hunspell), en_GB (hunspell), en_GH (hunspell), en_HK +# (hunspell), en_IE (hunspell), en_IN (hunspell), en_JM (hunspell), en_MW +# (hunspell), en_NA (hunspell), en_NG (hunspell), en_NZ (hunspell), en_PH +# (hunspell), en_SG (hunspell), en_TT (hunspell), en_US (hunspell), en_ZA +# (hunspell), en_ZM (hunspell), en_ZW (hunspell). +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception diff --git a/src/pybind/mgr/CMakeLists.txt b/src/pybind/mgr/CMakeLists.txt new file mode 100644 index 000000000..e8c06c9e2 --- /dev/null +++ b/src/pybind/mgr/CMakeLists.txt @@ -0,0 +1,63 @@ +set(mgr_module_install_excludes + PATTERN "CMakeLists.txt" EXCLUDE + PATTERN ".gitignore" EXCLUDE + PATTERN "tox.ini" EXCLUDE + PATTERN "requirements*.txt" EXCLUDE + PATTERN "constraints*.txt" EXCLUDE + PATTERN "tests/*" EXCLUDE) + +add_subdirectory(dashboard) + +if(WITH_MGR_ROOK_CLIENT) + add_subdirectory(rook) +endif() +if(WITH_TESTS) + include(AddCephTest) + add_tox_test(mgr ${CMAKE_CURRENT_SOURCE_DIR} TOX_ENVS py3 py37 mypy flake8 jinjalint nooptional) +endif() + +# Location needs to match default setting for mgr_module_path, currently: +# OPTION(mgr_module_path, OPT_STR, CEPH_INSTALL_DATADIR "/mgr") +set(mgr_modules + alerts + balancer + cephadm + crash + # dashboard (optional) + devicehealth + diskprediction_local + # hello is an example for developers, not for user + influx + insights + iostat + k8sevents + localpool + mds_autoscaler + mirroring + nfs + orchestrator + osd_perf_query + osd_support + pg_autoscaler + progress + prometheus + rbd_support + restful + rgw + # rook (optional) + selftest + snap_schedule + stats + status + telegraf + telemetry + # tests (for testing purpose only) + test_orchestrator + volumes + zabbix) + +install(DIRECTORY ${mgr_modules} + DESTINATION ${CEPH_INSTALL_DATADIR}/mgr + ${mgr_module_install_excludes}) +install(FILES mgr_module.py mgr_util.py object_format.py + DESTINATION ${CEPH_INSTALL_DATADIR}/mgr) diff --git a/src/pybind/mgr/alerts/__init__.py b/src/pybind/mgr/alerts/__init__.py new file mode 100644 index 000000000..f2f1d781b --- /dev/null +++ b/src/pybind/mgr/alerts/__init__.py @@ -0,0 +1,2 @@ +# flake8: noqa +from .module import Alerts diff --git a/src/pybind/mgr/alerts/module.py b/src/pybind/mgr/alerts/module.py new file mode 100644 index 000000000..f20f04716 --- /dev/null +++ b/src/pybind/mgr/alerts/module.py @@ -0,0 +1,258 @@ + +""" +A simple cluster health alerting module. +""" + +from mgr_module import CLIReadCommand, HandleCommandResult, MgrModule, Option +from email.utils import formatdate, make_msgid +from threading import Event +from typing import Any, Optional, Dict, List, TYPE_CHECKING, Union +import json +import smtplib + + +class Alerts(MgrModule): + MODULE_OPTIONS = [ + Option( + name='interval', + type='secs', + default=60, + desc='How frequently to reexamine health status', + runtime=True), + # smtp + Option( + name='smtp_host', + default='', + desc='SMTP server', + runtime=True), + Option( + name='smtp_destination', + default='', + desc='Email address to send alerts to', + runtime=True), + Option( + name='smtp_port', + type='int', + default=465, + desc='SMTP port', + runtime=True), + Option( + name='smtp_ssl', + type='bool', + default=True, + desc='Use SSL to connect to SMTP server', + runtime=True), + Option( + name='smtp_user', + default='', + desc='User to authenticate as', + runtime=True), + Option( + name='smtp_password', + default='', + desc='Password to authenticate with', + runtime=True), + Option( + name='smtp_sender', + default='', + desc='SMTP envelope sender', + runtime=True), + Option( + name='smtp_from_name', + default='Ceph', + desc='Email From: name', + runtime=True) + ] + + # These are "native" Ceph options that this module cares about. + NATIVE_OPTIONS: List[str] = [ + ] + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super(Alerts, self).__init__(*args, **kwargs) + + # set up some members to enable the serve() method and shutdown() + self.run = True + self.event = Event() + + # ensure config options members are initialized; see config_notify() + self.config_notify() + + self.log.info("Init") + + if TYPE_CHECKING: + self.interval = 60 + self.smtp_host = '' + self.smtp_destination = '' + self.smtp_port = 0 + self.smtp_ssl = True + self.smtp_user = '' + self.smtp_password = '' + self.smtp_sender = '' + self.smtp_from_name = '' + + def config_notify(self) -> None: + """ + This method is called whenever one of our config options is changed. + """ + # This is some boilerplate that stores MODULE_OPTIONS in a class + # member, so that, for instance, the 'emphatic' option is always + # available as 'self.emphatic'. + for opt in self.MODULE_OPTIONS: + setattr(self, + opt['name'], + self.get_module_option(opt['name'])) + self.log.debug(' mgr option %s = %s', + opt['name'], getattr(self, opt['name'])) + # Do the same for the native options. + for opt in self.NATIVE_OPTIONS: + setattr(self, + opt, + self.get_ceph_option(opt)) + self.log.debug(' native option %s = %s', opt, getattr(self, opt)) + + @CLIReadCommand('alerts send') + def send(self) -> HandleCommandResult: + """ + (re)send alerts immediately + """ + status = json.loads(self.get('health')['json']) + self._send_alert(status, {}) + return HandleCommandResult() + + def _diff(self, last: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any]: + d: Dict[str, Any] = {} + for code, alert in new.get('checks', {}).items(): + self.log.debug('new code %s alert %s' % (code, alert)) + if code not in last.get('checks', {}): + if 'new' not in d: + d['new'] = {} + d['new'][code] = alert + elif (alert['summary'].get('count', 0) + > last['checks'][code]['summary'].get('count', 0)): + if 'updated' not in d: + d['updated'] = {} + d['updated'][code] = alert + for code, alert in last.get('checks', {}).items(): + self.log.debug('old code %s alert %s' % (code, alert)) + if code not in new.get('checks', {}): + if 'cleared' not in d: + d['cleared'] = {} + d['cleared'][code] = alert + return d + + def _send_alert(self, status: Dict[str, Any], diff: Dict[str, Any]) -> None: + checks = {} + if self.smtp_host: + r = self._send_alert_smtp(status, diff) + if r: + for code, alert in r.items(): + checks[code] = alert + else: + self.log.warning('Alert is not sent because smtp_host is not configured') + self.set_health_checks(checks) + + def serve(self) -> None: + """ + This method is called by the mgr when the module starts and can be + used for any background activity. + """ + self.log.info("Starting") + last_status: Dict[str, Any] = {} + while self.run: + # Do some useful background work here. + new_status = json.loads(self.get('health')['json']) + if new_status != last_status: + self.log.debug('last_status %s' % last_status) + self.log.debug('new_status %s' % new_status) + diff = self._diff(last_status, + new_status) + self.log.debug('diff %s' % diff) + if diff: + self._send_alert(new_status, diff) + last_status = new_status + + self.log.debug('Sleeping for %s seconds', self.interval) + self.event.wait(self.interval or 60) + self.event.clear() + + def shutdown(self) -> None: + """ + This method is called by the mgr when the module needs to shut + down (i.e., when the serve() function needs to exit). + """ + self.log.info('Stopping') + self.run = False + self.event.set() + + # SMTP + def _smtp_format_alert(self, code: str, alert: Dict[str, Any]) -> str: + r = '[{sev}] {code}: {summary}\n'.format( + code=code, + sev=alert['severity'].split('_')[1], + summary=alert['summary']['message']) + for detail in alert['detail']: + r += ' {message}\n'.format( + message=detail['message']) + return r + + def _send_alert_smtp(self, + status: Dict[str, Any], + diff: Dict[str, Any]) -> Optional[Dict[str, Any]]: + # message + self.log.debug('_send_alert_smtp') + message = ('From: {from_name} <{sender}>\n' + 'Subject: {status}\n' + 'To: {target}\n' + 'Message-Id: {message_id}\n' + 'Date: {date}\n' + '\n' + '{status}\n'.format( + sender=self.smtp_sender, + from_name=self.smtp_from_name, + status=status['status'], + target=self.smtp_destination, + message_id=make_msgid(), + date=formatdate())) + + if 'new' in diff: + message += ('\n--- New ---\n') + for code, alert in diff['new'].items(): + message += self._smtp_format_alert(code, alert) + if 'updated' in diff: + message += ('\n--- Updated ---\n') + for code, alert in diff['updated'].items(): + message += self._smtp_format_alert(code, alert) + if 'cleared' in diff: + message += ('\n--- Cleared ---\n') + for code, alert in diff['cleared'].items(): + message += self._smtp_format_alert(code, alert) + + message += ('\n\n=== Full health status ===\n') + for code, alert in status['checks'].items(): + message += self._smtp_format_alert(code, alert) + + self.log.debug('message: %s' % message) + + # send + try: + if self.smtp_ssl: + server: Union[smtplib.SMTP_SSL, smtplib.SMTP] = \ + smtplib.SMTP_SSL(self.smtp_host, self.smtp_port) + else: + server = smtplib.SMTP(self.smtp_host, self.smtp_port) + if self.smtp_password: + server.login(self.smtp_user, self.smtp_password) + server.sendmail(self.smtp_sender, self.smtp_destination, message) + server.quit() + except Exception as e: + return { + 'ALERTS_SMTP_ERROR': { + 'severity': 'warning', + 'summary': 'unable to send alert email', + 'count': 1, + 'detail': [str(e)] + } + } + self.log.debug('Sent email to %s' % self.smtp_destination) + return None diff --git a/src/pybind/mgr/balancer/__init__.py b/src/pybind/mgr/balancer/__init__.py new file mode 100644 index 000000000..ee85dc9d3 --- /dev/null +++ b/src/pybind/mgr/balancer/__init__.py @@ -0,0 +1,2 @@ +# flake8: noqa +from .module import Module diff --git a/src/pybind/mgr/balancer/module.py b/src/pybind/mgr/balancer/module.py new file mode 100644 index 000000000..1c4042511 --- /dev/null +++ b/src/pybind/mgr/balancer/module.py @@ -0,0 +1,1409 @@ +""" +Balance PG distribution across OSDs. +""" + +import copy +import enum +import errno +import json +import math +import random +import time +from mgr_module import CLIReadCommand, CLICommand, CommandResult, MgrModule, Option, OSDMap +from threading import Event +from typing import cast, Any, Dict, List, Optional, Sequence, Tuple, Union +from mgr_module import CRUSHMap +import datetime + +TIME_FORMAT = '%Y-%m-%d_%H:%M:%S' + + +class MappingState: + def __init__(self, osdmap, raw_pg_stats, raw_pool_stats, desc=''): + self.desc = desc + self.osdmap = osdmap + self.osdmap_dump = self.osdmap.dump() + self.crush = osdmap.get_crush() + self.crush_dump = self.crush.dump() + self.raw_pg_stats = raw_pg_stats + self.raw_pool_stats = raw_pool_stats + self.pg_stat = { + i['pgid']: i['stat_sum'] for i in raw_pg_stats.get('pg_stats', []) + } + osd_poolids = [p['pool'] for p in self.osdmap_dump.get('pools', [])] + pg_poolids = [p['poolid'] for p in raw_pool_stats.get('pool_stats', [])] + self.poolids = set(osd_poolids) & set(pg_poolids) + self.pg_up = {} + self.pg_up_by_poolid = {} + for poolid in self.poolids: + self.pg_up_by_poolid[poolid] = osdmap.map_pool_pgs_up(poolid) + for a, b in self.pg_up_by_poolid[poolid].items(): + self.pg_up[a] = b + + def calc_misplaced_from(self, other_ms): + num = len(other_ms.pg_up) + misplaced = 0 + for pgid, before in other_ms.pg_up.items(): + if before != self.pg_up.get(pgid, []): + misplaced += 1 + if num > 0: + return float(misplaced) / float(num) + return 0.0 + + +class Mode(enum.Enum): + none = 'none' + crush_compat = 'crush-compat' + upmap = 'upmap' + + +class Plan(object): + def __init__(self, name, mode, osdmap, pools): + self.name = name + self.mode = mode + self.osdmap = osdmap + self.osdmap_dump = osdmap.dump() + self.pools = pools + self.osd_weights = {} + self.compat_ws = {} + self.inc = osdmap.new_incremental() + self.pg_status = {} + + def dump(self) -> str: + return json.dumps(self.inc.dump(), indent=4, sort_keys=True) + + def show(self) -> str: + return 'upmap plan' + + +class MsPlan(Plan): + """ + Plan with a preloaded MappingState member. + """ + + def __init__(self, name: str, mode: str, ms: MappingState, pools: List[str]) -> None: + super(MsPlan, self).__init__(name, mode, ms.osdmap, pools) + self.initial = ms + + def final_state(self) -> MappingState: + self.inc.set_osd_reweights(self.osd_weights) + self.inc.set_crush_compat_weight_set_weights(self.compat_ws) + return MappingState(self.initial.osdmap.apply_incremental(self.inc), + self.initial.raw_pg_stats, + self.initial.raw_pool_stats, + 'plan %s final' % self.name) + + def show(self) -> str: + ls = [] + ls.append('# starting osdmap epoch %d' % self.initial.osdmap.get_epoch()) + ls.append('# starting crush version %d' % + self.initial.osdmap.get_crush_version()) + ls.append('# mode %s' % self.mode) + if len(self.compat_ws) and \ + not CRUSHMap.have_default_choose_args(self.initial.crush_dump): + ls.append('ceph osd crush weight-set create-compat') + for osd, weight in self.compat_ws.items(): + ls.append('ceph osd crush weight-set reweight-compat %s %f' % + (osd, weight)) + for osd, weight in self.osd_weights.items(): + ls.append('ceph osd reweight osd.%d %f' % (osd, weight)) + incdump = self.inc.dump() + for pgid in incdump.get('old_pg_upmap_items', []): + ls.append('ceph osd rm-pg-upmap-items %s' % pgid) + for item in incdump.get('new_pg_upmap_items', []): + osdlist = [] + for m in item['mappings']: + osdlist += [m['from'], m['to']] + ls.append('ceph osd pg-upmap-items %s %s' % + (item['pgid'], ' '.join([str(a) for a in osdlist]))) + return '\n'.join(ls) + + +class Eval: + def __init__(self, ms: MappingState): + self.ms = ms + self.root_ids: Dict[str, int] = {} # root name -> id + self.pool_name: Dict[str, str] = {} # pool id -> pool name + self.pool_id: Dict[str, int] = {} # pool name -> id + self.pool_roots: Dict[str, List[str]] = {} # pool name -> root name + self.root_pools: Dict[str, List[str]] = {} # root name -> pools + self.target_by_root: Dict[str, Dict[int, float]] = {} # root name -> target weight map + self.count_by_pool: Dict[str, dict] = {} + self.count_by_root: Dict[str, dict] = {} + self.actual_by_pool: Dict[str, dict] = {} # pool -> by_* -> actual weight map + self.actual_by_root: Dict[str, dict] = {} # pool -> by_* -> actual weight map + self.total_by_pool: Dict[str, dict] = {} # pool -> by_* -> total + self.total_by_root: Dict[str, dict] = {} # root -> by_* -> total + self.stats_by_pool: Dict[str, dict] = {} # pool -> by_* -> stddev or avg -> value + self.stats_by_root: Dict[str, dict] = {} # root -> by_* -> stddev or avg -> value + + self.score_by_pool: Dict[str, float] = {} + self.score_by_root: Dict[str, Dict[str, float]] = {} + + self.score = 0.0 + + def show(self, verbose: bool = False) -> str: + if verbose: + r = self.ms.desc + '\n' + r += 'target_by_root %s\n' % self.target_by_root + r += 'actual_by_pool %s\n' % self.actual_by_pool + r += 'actual_by_root %s\n' % self.actual_by_root + r += 'count_by_pool %s\n' % self.count_by_pool + r += 'count_by_root %s\n' % self.count_by_root + r += 'total_by_pool %s\n' % self.total_by_pool + r += 'total_by_root %s\n' % self.total_by_root + r += 'stats_by_root %s\n' % self.stats_by_root + r += 'score_by_pool %s\n' % self.score_by_pool + r += 'score_by_root %s\n' % self.score_by_root + else: + r = self.ms.desc + ' ' + r += 'score %f (lower is better)\n' % self.score + return r + + def calc_stats(self, count, target, total): + num = max(len(target), 1) + r: Dict[str, Dict[str, Union[int, float]]] = {} + for t in ('pgs', 'objects', 'bytes'): + if total[t] == 0: + r[t] = { + 'max': 0, + 'min': 0, + 'avg': 0, + 'stddev': 0, + 'sum_weight': 0, + 'score': 0, + } + continue + + avg = float(total[t]) / float(num) + dev = 0.0 + + # score is a measure of how uneven the data distribution is. + # score lies between [0, 1), 0 means perfect distribution. + score = 0.0 + sum_weight = 0.0 + + for k, v in count[t].items(): + # adjust/normalize by weight + if target[k]: + adjusted = float(v) / target[k] / float(num) + else: + adjusted = 0.0 + + # Overweighted devices and their weights are factors to calculate reweight_urgency. + # One 10% underfilled device with 5 2% overfilled devices, is arguably a better + # situation than one 10% overfilled with 5 2% underfilled devices + if adjusted > avg: + ''' + F(x) = 2*phi(x) - 1, where phi(x) = cdf of standard normal distribution + x = (adjusted - avg)/avg. + Since, we're considering only over-weighted devices, x >= 0, and so phi(x) lies in [0.5, 1). + To bring range of F(x) in range [0, 1), we need to make the above modification. + + In general, we need to use a function F(x), where x = (adjusted - avg)/avg + 1. which is bounded between 0 and 1, so that ultimately reweight_urgency will also be bounded. + 2. A larger value of x, should imply more urgency to reweight. + 3. Also, the difference between F(x) when x is large, should be minimal. + 4. The value of F(x) should get close to 1 (highest urgency to reweight) with steeply. + + Could have used F(x) = (1 - e^(-x)). But that had slower convergence to 1, compared to the one currently in use. + + cdf of standard normal distribution: https://stackoverflow.com/a/29273201 + ''' + score += target[k] * (math.erf(((adjusted - avg) / avg) / math.sqrt(2.0))) + sum_weight += target[k] + dev += (avg - adjusted) * (avg - adjusted) + stddev = math.sqrt(dev / float(max(num - 1, 1))) + score = score / max(sum_weight, 1) + r[t] = { + 'max': max(count[t].values()), + 'min': min(count[t].values()), + 'avg': avg, + 'stddev': stddev, + 'sum_weight': sum_weight, + 'score': score, + } + return r + + +class Module(MgrModule): + MODULE_OPTIONS = [ + Option(name='active', + type='bool', + default=True, + desc='automatically balance PGs across cluster', + runtime=True), + Option(name='begin_time', + type='str', + default='0000', + desc='beginning time of day to automatically balance', + long_desc='This is a time of day in the format HHMM.', + runtime=True), + Option(name='end_time', + type='str', + default='2359', + desc='ending time of day to automatically balance', + long_desc='This is a time of day in the format HHMM.', + runtime=True), + Option(name='begin_weekday', + type='uint', + default=0, + min=0, + max=6, + desc='Restrict automatic balancing to this day of the week or later', + long_desc='0 = Sunday, 1 = Monday, etc.', + runtime=True), + Option(name='end_weekday', + type='uint', + default=0, + min=0, + max=6, + desc='Restrict automatic balancing to days of the week earlier than this', + long_desc='0 = Sunday, 1 = Monday, etc.', + runtime=True), + Option(name='crush_compat_max_iterations', + type='uint', + default=25, + min=1, + max=250, + desc='maximum number of iterations to attempt optimization', + runtime=True), + Option(name='crush_compat_metrics', + type='str', + default='pgs,objects,bytes', + desc='metrics with which to calculate OSD utilization', + long_desc='Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization.', + runtime=True), + Option(name='crush_compat_step', + type='float', + default=.5, + min=.001, + max=.999, + desc='aggressiveness of optimization', + long_desc='.99 is very aggressive, .01 is less aggressive', + runtime=True), + Option(name='min_score', + type='float', + default=0, + desc='minimum score, below which no optimization is attempted', + runtime=True), + Option(name='mode', + desc='Balancer mode', + default='upmap', + enum_allowed=['none', 'crush-compat', 'upmap'], + runtime=True), + Option(name='sleep_interval', + type='secs', + default=60, + desc='how frequently to wake up and attempt optimization', + runtime=True), + Option(name='upmap_max_optimizations', + type='uint', + default=10, + desc='maximum upmap optimizations to make per attempt', + runtime=True), + Option(name='upmap_max_deviation', + type='int', + default=5, + min=1, + desc='deviation below which no optimization is attempted', + long_desc='If the number of PGs are within this count then no optimization is attempted', + runtime=True), + Option(name='pool_ids', + type='str', + default='', + desc='pools which the automatic balancing will be limited to', + runtime=True) + ] + + active = False + run = True + plans: Dict[str, Plan] = {} + mode = '' + optimizing = False + last_optimize_started = '' + last_optimize_duration = '' + optimize_result = '' + no_optimization_needed = False + success_string = 'Optimization plan created successfully' + in_progress_string = 'in progress' + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super(Module, self).__init__(*args, **kwargs) + self.event = Event() + + @CLIReadCommand('balancer status') + def show_status(self) -> Tuple[int, str, str]: + """ + Show balancer status + """ + s = { + 'plans': list(self.plans.keys()), + 'active': self.active, + 'last_optimize_started': self.last_optimize_started, + 'last_optimize_duration': self.last_optimize_duration, + 'optimize_result': self.optimize_result, + 'no_optimization_needed': self.no_optimization_needed, + 'mode': self.get_module_option('mode'), + } + return (0, json.dumps(s, indent=4, sort_keys=True), '') + + @CLICommand('balancer mode') + def set_mode(self, mode: Mode) -> Tuple[int, str, str]: + """ + Set balancer mode + """ + if mode == Mode.upmap: + min_compat_client = self.get_osdmap().dump().get('require_min_compat_client', '') + if min_compat_client < 'luminous': # works well because version is alphabetized.. + warn = ('min_compat_client "%s" ' + '< "luminous", which is required for pg-upmap. ' + 'Try "ceph osd set-require-min-compat-client luminous" ' + 'before enabling this mode' % min_compat_client) + return (-errno.EPERM, '', warn) + elif mode == Mode.crush_compat: + ms = MappingState(self.get_osdmap(), + self.get("pg_stats"), + self.get("pool_stats"), + 'initialize compat weight-set') + self.get_compat_weight_set_weights(ms) # ignore error + self.set_module_option('mode', mode.value) + return (0, '', '') + + @CLICommand('balancer on') + def on(self) -> Tuple[int, str, str]: + """ + Enable automatic balancing + """ + if not self.active: + self.set_module_option('active', 'true') + self.active = True + self.event.set() + return (0, '', '') + + @CLICommand('balancer off') + def off(self) -> Tuple[int, str, str]: + """ + Disable automatic balancing + """ + if self.active: + self.set_module_option('active', 'false') + self.active = False + self.event.set() + return (0, '', '') + + @CLIReadCommand('balancer pool ls') + def pool_ls(self) -> Tuple[int, str, str]: + """ + List automatic balancing pools + + Note that empty list means all existing pools will be automatic balancing targets, + which is the default behaviour of balancer. + """ + pool_ids = cast(str, self.get_module_option('pool_ids')) + if pool_ids == '': + return (0, '', '') + pool_ids = [int(p) for p in pool_ids.split(',')] + pool_name_by_id = dict((p['pool'], p['pool_name']) + for p in self.get_osdmap().dump().get('pools', [])) + should_prune = False + final_ids: List[int] = [] + final_names = [] + for p in pool_ids: + if p in pool_name_by_id: + final_ids.append(p) + final_names.append(pool_name_by_id[p]) + else: + should_prune = True + if should_prune: # some pools were gone, prune + self.set_module_option('pool_ids', ','.join(str(p) for p in final_ids)) + return (0, json.dumps(sorted(final_names), indent=4, sort_keys=True), '') + + @CLICommand('balancer pool add') + def pool_add(self, pools: Sequence[str]) -> Tuple[int, str, str]: + """ + Enable automatic balancing for specific pools + """ + raw_names = pools + pool_id_by_name = dict((p['pool_name'], p['pool']) + for p in self.get_osdmap().dump().get('pools', [])) + invalid_names = [p for p in raw_names if p not in pool_id_by_name] + if invalid_names: + return (-errno.EINVAL, '', 'pool(s) %s not found' % invalid_names) + to_add = set(str(pool_id_by_name[p]) for p in raw_names if p in pool_id_by_name) + pool_ids = cast(str, self.get_module_option('pool_ids')) + existing = set(pool_ids.split(',') if pool_ids else []) + final = to_add | existing + self.set_module_option('pool_ids', ','.join(final)) + return (0, '', '') + + @CLICommand('balancer pool rm') + def pool_rm(self, pools: Sequence[str]) -> Tuple[int, str, str]: + """ + Disable automatic balancing for specific pools + """ + raw_names = pools + existing = cast(str, self.get_module_option('pool_ids')) + if existing == '': # for idempotence + return (0, '', '') + existing = existing.split(',') + osdmap = self.get_osdmap() + pool_ids = [str(p['pool']) for p in osdmap.dump().get('pools', [])] + pool_id_by_name = dict((p['pool_name'], p['pool']) for p in osdmap.dump().get('pools', [])) + final = [p for p in existing if p in pool_ids] + to_delete = [str(pool_id_by_name[p]) for p in raw_names if p in pool_id_by_name] + final = set(final) - set(to_delete) + self.set_module_option('pool_ids', ','.join(final)) + return (0, '', '') + + def _state_from_option(self, option: Optional[str] = None) -> Tuple[MappingState, List[str]]: + pools = [] + if option is None: + ms = MappingState(self.get_osdmap(), + self.get("pg_stats"), + self.get("pool_stats"), + 'current cluster') + elif option in self.plans: + plan = self.plans.get(option) + assert plan + pools = plan.pools + if plan.mode == 'upmap': + # Note that for upmap, to improve the efficiency, + # we use a basic version of Plan without keeping the obvious + # *redundant* MS member. + # Hence ms might not be accurate here since we are basically + # using an old snapshotted osdmap vs a fresh copy of pg_stats. + # It should not be a big deal though.. + ms = MappingState(plan.osdmap, + self.get("pg_stats"), + self.get("pool_stats"), + f'plan "{plan.name}"') + else: + ms = cast(MsPlan, plan).final_state() + else: + # not a plan, does it look like a pool? + osdmap = self.get_osdmap() + valid_pool_names = [p['pool_name'] for p in osdmap.dump().get('pools', [])] + if option not in valid_pool_names: + raise ValueError(f'option "{option}" not a plan or a pool') + pools.append(option) + ms = MappingState(osdmap, + self.get("pg_stats"), + self.get("pool_stats"), + f'pool "{option}"') + return ms, pools + + @CLIReadCommand('balancer eval-verbose') + def plan_eval_verbose(self, option: Optional[str] = None): + """ + Evaluate data distribution for the current cluster or specific pool or specific + plan (verbosely) + """ + try: + ms, pools = self._state_from_option(option) + return (0, self.evaluate(ms, pools, verbose=True), '') + except ValueError as e: + return (-errno.EINVAL, '', str(e)) + + @CLIReadCommand('balancer eval') + def plan_eval_brief(self, option: Optional[str] = None): + """ + Evaluate data distribution for the current cluster or specific pool or specific plan + """ + try: + ms, pools = self._state_from_option(option) + return (0, self.evaluate(ms, pools, verbose=False), '') + except ValueError as e: + return (-errno.EINVAL, '', str(e)) + + @CLIReadCommand('balancer optimize') + def plan_optimize(self, plan: str, pools: List[str] = []) -> Tuple[int, str, str]: + """ + Run optimizer to create a new plan + """ + # The GIL can be release by the active balancer, so disallow when active + if self.active: + return (-errno.EINVAL, '', 'Balancer enabled, disable to optimize manually') + if self.optimizing: + return (-errno.EINVAL, '', 'Balancer finishing up....try again') + osdmap = self.get_osdmap() + valid_pool_names = [p['pool_name'] for p in osdmap.dump().get('pools', [])] + invalid_pool_names = [] + for p in pools: + if p not in valid_pool_names: + invalid_pool_names.append(p) + if len(invalid_pool_names): + return (-errno.EINVAL, '', 'pools %s not found' % invalid_pool_names) + plan_ = self.plan_create(plan, osdmap, pools) + self.last_optimize_started = time.asctime(time.localtime()) + self.optimize_result = self.in_progress_string + start = time.time() + r, detail = self.optimize(plan_) + end = time.time() + self.last_optimize_duration = str(datetime.timedelta(seconds=(end - start))) + if r == 0: + # Add plan if an optimization was created + self.optimize_result = self.success_string + self.plans[plan] = plan_ + else: + self.optimize_result = detail + return (r, '', detail) + + @CLIReadCommand('balancer show') + def plan_show(self, plan: str) -> Tuple[int, str, str]: + """ + Show details of an optimization plan + """ + plan_ = self.plans.get(plan) + if not plan_: + return (-errno.ENOENT, '', f'plan {plan} not found') + return (0, plan_.show(), '') + + @CLICommand('balancer rm') + def plan_rm(self, plan: str) -> Tuple[int, str, str]: + """ + Discard an optimization plan + """ + if plan in self.plans: + del self.plans[plan] + return (0, '', '') + + @CLICommand('balancer reset') + def plan_reset(self) -> Tuple[int, str, str]: + """ + Discard all optimization plans + """ + self.plans = {} + return (0, '', '') + + @CLIReadCommand('balancer dump') + def plan_dump(self, plan: str) -> Tuple[int, str, str]: + """ + Show an optimization plan + """ + plan_ = self.plans.get(plan) + if not plan_: + return -errno.ENOENT, '', f'plan {plan} not found' + else: + return (0, plan_.dump(), '') + + @CLIReadCommand('balancer ls') + def plan_ls(self) -> Tuple[int, str, str]: + """ + List all plans + """ + return (0, json.dumps([p for p in self.plans], indent=4, sort_keys=True), '') + + @CLIReadCommand('balancer execute') + def plan_execute(self, plan: str) -> Tuple[int, str, str]: + """ + Execute an optimization plan + """ + # The GIL can be release by the active balancer, so disallow when active + if self.active: + return (-errno.EINVAL, '', 'Balancer enabled, disable to execute a plan') + if self.optimizing: + return (-errno.EINVAL, '', 'Balancer finishing up....try again') + plan_ = self.plans.get(plan) + if not plan_: + return (-errno.ENOENT, '', f'plan {plan} not found') + r, detail = self.execute(plan_) + self.plan_rm(plan) + return (r, '', detail) + + def shutdown(self) -> None: + self.log.info('Stopping') + self.run = False + self.event.set() + + def time_permit(self) -> bool: + local_time = time.localtime() + time_of_day = time.strftime('%H%M', local_time) + weekday = (local_time.tm_wday + 1) % 7 # be compatible with C + permit = False + + def check_time(time: str, option: str): + if len(time) != 4: + self.log.error('invalid time for %s - expected HHMM format', option) + try: + datetime.time(int(time[:2]), int(time[2:])) + except ValueError as err: + self.log.error('invalid time for %s - %s', option, err) + + begin_time = cast(str, self.get_module_option('begin_time')) + check_time(begin_time, 'begin_time') + end_time = cast(str, self.get_module_option('end_time')) + check_time(end_time, 'end_time') + if begin_time < end_time: + permit = begin_time <= time_of_day < end_time + elif begin_time == end_time: + permit = True + else: + permit = time_of_day >= begin_time or time_of_day < end_time + if not permit: + self.log.debug("should run between %s - %s, now %s, skipping", + begin_time, end_time, time_of_day) + return False + + begin_weekday = cast(int, self.get_module_option('begin_weekday')) + end_weekday = cast(int, self.get_module_option('end_weekday')) + if begin_weekday < end_weekday: + permit = begin_weekday <= weekday <= end_weekday + elif begin_weekday == end_weekday: + permit = True + else: + permit = weekday >= begin_weekday or weekday < end_weekday + if not permit: + self.log.debug("should run between weekday %d - %d, now %d, skipping", + begin_weekday, end_weekday, weekday) + return False + + return True + + def serve(self) -> None: + self.log.info('Starting') + while self.run: + self.active = cast(bool, self.get_module_option('active')) + sleep_interval = cast(float, self.get_module_option('sleep_interval')) + self.log.debug('Waking up [%s, now %s]', + "active" if self.active else "inactive", + time.strftime(TIME_FORMAT, time.localtime())) + if self.active and self.time_permit(): + self.log.debug('Running') + name = 'auto_%s' % time.strftime(TIME_FORMAT, time.gmtime()) + osdmap = self.get_osdmap() + pool_ids = cast(str, self.get_module_option('pool_ids')) + if pool_ids: + allow = [int(p) for p in pool_ids.split(',')] + else: + allow = [] + final: List[str] = [] + if allow: + pools = osdmap.dump().get('pools', []) + valid = [p['pool'] for p in pools] + ids = set(allow) & set(valid) + if set(allow) - set(valid): # some pools were gone, prune + self.set_module_option('pool_ids', ','.join(str(p) for p in ids)) + pool_name_by_id = dict((p['pool'], p['pool_name']) for p in pools) + final = [pool_name_by_id[p] for p in ids if p in pool_name_by_id] + plan = self.plan_create(name, osdmap, final) + self.optimizing = True + self.last_optimize_started = time.asctime(time.localtime()) + self.optimize_result = self.in_progress_string + start = time.time() + r, detail = self.optimize(plan) + end = time.time() + self.last_optimize_duration = str(datetime.timedelta(seconds=(end - start))) + if r == 0: + self.optimize_result = self.success_string + self.execute(plan) + else: + self.optimize_result = detail + self.optimizing = False + self.log.debug('Sleeping for %d', sleep_interval) + self.event.wait(sleep_interval) + self.event.clear() + + def plan_create(self, name: str, osdmap: OSDMap, pools: List[str]) -> Plan: + mode = cast(str, self.get_module_option('mode')) + if mode == 'upmap': + # drop unnecessary MS member for upmap mode. + # this way we could effectively eliminate the usage of a + # complete pg_stats, which can become horribly inefficient + # as pg_num grows.. + plan = Plan(name, mode, osdmap, pools) + else: + plan = MsPlan(name, + mode, + MappingState(osdmap, + self.get("pg_stats"), + self.get("pool_stats"), + 'plan %s initial' % name), + pools) + return plan + + def calc_eval(self, ms: MappingState, pools: List[str]) -> Eval: + pe = Eval(ms) + pool_rule = {} + pool_info = {} + for p in ms.osdmap_dump.get('pools', []): + if len(pools) and p['pool_name'] not in pools: + continue + # skip dead or not-yet-ready pools too + if p['pool'] not in ms.poolids: + continue + pe.pool_name[p['pool']] = p['pool_name'] + pe.pool_id[p['pool_name']] = p['pool'] + pool_rule[p['pool_name']] = p['crush_rule'] + pe.pool_roots[p['pool_name']] = [] + pool_info[p['pool_name']] = p + if len(pool_info) == 0: + return pe + self.log.debug('pool_name %s' % pe.pool_name) + self.log.debug('pool_id %s' % pe.pool_id) + self.log.debug('pools %s' % pools) + self.log.debug('pool_rule %s' % pool_rule) + + osd_weight = {a['osd']: a['weight'] + for a in ms.osdmap_dump.get('osds', []) if a['weight'] > 0} + + # get expected distributions by root + actual_by_root: Dict[str, Dict[str, dict]] = {} + rootids = ms.crush.find_takes() + roots = [] + for rootid in rootids: + ls = ms.osdmap.get_pools_by_take(rootid) + want = [] + # find out roots associating with pools we are passed in + for candidate in ls: + if candidate in pe.pool_name: + want.append(candidate) + if len(want) == 0: + continue + root = ms.crush.get_item_name(rootid) + pe.root_pools[root] = [] + for poolid in want: + pe.pool_roots[pe.pool_name[poolid]].append(root) + pe.root_pools[root].append(pe.pool_name[poolid]) + pe.root_ids[root] = rootid + roots.append(root) + weight_map = ms.crush.get_take_weight_osd_map(rootid) + adjusted_map = { + osd: cw * osd_weight[osd] + for osd, cw in weight_map.items() if osd in osd_weight and cw > 0 + } + sum_w = sum(adjusted_map.values()) + assert len(adjusted_map) == 0 or sum_w > 0 + pe.target_by_root[root] = {osd: w / sum_w + for osd, w in adjusted_map.items()} + actual_by_root[root] = { + 'pgs': {}, + 'objects': {}, + 'bytes': {}, + } + for osd in pe.target_by_root[root]: + actual_by_root[root]['pgs'][osd] = 0 + actual_by_root[root]['objects'][osd] = 0 + actual_by_root[root]['bytes'][osd] = 0 + pe.total_by_root[root] = { + 'pgs': 0, + 'objects': 0, + 'bytes': 0, + } + self.log.debug('pool_roots %s' % pe.pool_roots) + self.log.debug('root_pools %s' % pe.root_pools) + self.log.debug('target_by_root %s' % pe.target_by_root) + + # pool and root actual + for pool, pi in pool_info.items(): + poolid = pi['pool'] + pm = ms.pg_up_by_poolid[poolid] + pgs = 0 + objects = 0 + bytes = 0 + pgs_by_osd = {} + objects_by_osd = {} + bytes_by_osd = {} + for pgid, up in pm.items(): + for osd in [int(osd) for osd in up]: + if osd == CRUSHMap.ITEM_NONE: + continue + if osd not in pgs_by_osd: + pgs_by_osd[osd] = 0 + objects_by_osd[osd] = 0 + bytes_by_osd[osd] = 0 + pgs_by_osd[osd] += 1 + objects_by_osd[osd] += ms.pg_stat[pgid]['num_objects'] + bytes_by_osd[osd] += ms.pg_stat[pgid]['num_bytes'] + # pick a root to associate this pg instance with. + # note that this is imprecise if the roots have + # overlapping children. + # FIXME: divide bytes by k for EC pools. + for root in pe.pool_roots[pool]: + if osd in pe.target_by_root[root]: + actual_by_root[root]['pgs'][osd] += 1 + actual_by_root[root]['objects'][osd] += ms.pg_stat[pgid]['num_objects'] + actual_by_root[root]['bytes'][osd] += ms.pg_stat[pgid]['num_bytes'] + pgs += 1 + objects += ms.pg_stat[pgid]['num_objects'] + bytes += ms.pg_stat[pgid]['num_bytes'] + pe.total_by_root[root]['pgs'] += 1 + pe.total_by_root[root]['objects'] += ms.pg_stat[pgid]['num_objects'] + pe.total_by_root[root]['bytes'] += ms.pg_stat[pgid]['num_bytes'] + break + pe.count_by_pool[pool] = { + 'pgs': { + k: v + for k, v in pgs_by_osd.items() + }, + 'objects': { + k: v + for k, v in objects_by_osd.items() + }, + 'bytes': { + k: v + for k, v in bytes_by_osd.items() + }, + } + pe.actual_by_pool[pool] = { + 'pgs': { + k: float(v) / float(max(pgs, 1)) + for k, v in pgs_by_osd.items() + }, + 'objects': { + k: float(v) / float(max(objects, 1)) + for k, v in objects_by_osd.items() + }, + 'bytes': { + k: float(v) / float(max(bytes, 1)) + for k, v in bytes_by_osd.items() + }, + } + pe.total_by_pool[pool] = { + 'pgs': pgs, + 'objects': objects, + 'bytes': bytes, + } + for root in pe.total_by_root: + pe.count_by_root[root] = { + 'pgs': { + k: float(v) + for k, v in actual_by_root[root]['pgs'].items() + }, + 'objects': { + k: float(v) + for k, v in actual_by_root[root]['objects'].items() + }, + 'bytes': { + k: float(v) + for k, v in actual_by_root[root]['bytes'].items() + }, + } + pe.actual_by_root[root] = { + 'pgs': { + k: float(v) / float(max(pe.total_by_root[root]['pgs'], 1)) + for k, v in actual_by_root[root]['pgs'].items() + }, + 'objects': { + k: float(v) / float(max(pe.total_by_root[root]['objects'], 1)) + for k, v in actual_by_root[root]['objects'].items() + }, + 'bytes': { + k: float(v) / float(max(pe.total_by_root[root]['bytes'], 1)) + for k, v in actual_by_root[root]['bytes'].items() + }, + } + self.log.debug('actual_by_pool %s' % pe.actual_by_pool) + self.log.debug('actual_by_root %s' % pe.actual_by_root) + + # average and stddev and score + pe.stats_by_root = { + a: pe.calc_stats( + b, + pe.target_by_root[a], + pe.total_by_root[a] + ) for a, b in pe.count_by_root.items() + } + self.log.debug('stats_by_root %s' % pe.stats_by_root) + + # the scores are already normalized + pe.score_by_root = { + r: { + 'pgs': pe.stats_by_root[r]['pgs']['score'], + 'objects': pe.stats_by_root[r]['objects']['score'], + 'bytes': pe.stats_by_root[r]['bytes']['score'], + } for r in pe.total_by_root.keys() + } + self.log.debug('score_by_root %s' % pe.score_by_root) + + # get the list of score metrics, comma separated + metrics = cast(str, self.get_module_option('crush_compat_metrics')).split(',') + + # total score is just average of normalized stddevs + pe.score = 0.0 + for r, vs in pe.score_by_root.items(): + for k, v in vs.items(): + if k in metrics: + pe.score += v + pe.score /= len(metrics) * len(roots) + return pe + + def evaluate(self, ms: MappingState, pools: List[str], verbose: bool = False) -> str: + pe = self.calc_eval(ms, pools) + return pe.show(verbose=verbose) + + def optimize(self, plan: Plan) -> Tuple[int, str]: + self.log.info('Optimize plan %s' % plan.name) + max_misplaced = cast(float, self.get_ceph_option('target_max_misplaced_ratio')) + self.log.info('Mode %s, max misplaced %f' % + (plan.mode, max_misplaced)) + + info = self.get('pg_status') + unknown = info.get('unknown_pgs_ratio', 0.0) + degraded = info.get('degraded_ratio', 0.0) + inactive = info.get('inactive_pgs_ratio', 0.0) + misplaced = info.get('misplaced_ratio', 0.0) + plan.pg_status = info + self.log.debug('unknown %f degraded %f inactive %f misplaced %g', + unknown, degraded, inactive, misplaced) + if unknown > 0.0: + detail = 'Some PGs (%f) are unknown; try again later' % unknown + self.log.info(detail) + return -errno.EAGAIN, detail + elif degraded > 0.0: + detail = 'Some objects (%f) are degraded; try again later' % degraded + self.log.info(detail) + return -errno.EAGAIN, detail + elif inactive > 0.0: + detail = 'Some PGs (%f) are inactive; try again later' % inactive + self.log.info(detail) + return -errno.EAGAIN, detail + elif misplaced >= max_misplaced: + detail = 'Too many objects (%f > %f) are misplaced; ' \ + 'try again later' % (misplaced, max_misplaced) + self.log.info(detail) + return -errno.EAGAIN, detail + else: + if plan.mode == 'upmap': + return self.do_upmap(plan) + elif plan.mode == 'crush-compat': + return self.do_crush_compat(cast(MsPlan, plan)) + elif plan.mode == 'none': + detail = 'Please do "ceph balancer mode" to choose a valid mode first' + self.log.info('Idle') + return -errno.ENOEXEC, detail + else: + detail = 'Unrecognized mode %s' % plan.mode + self.log.info(detail) + return -errno.EINVAL, detail + + def do_upmap(self, plan: Plan) -> Tuple[int, str]: + self.log.info('do_upmap') + max_optimizations = cast(float, self.get_module_option('upmap_max_optimizations')) + max_deviation = cast(int, self.get_module_option('upmap_max_deviation')) + osdmap_dump = plan.osdmap_dump + + if len(plan.pools): + pools = plan.pools + else: # all + pools = [str(i['pool_name']) for i in osdmap_dump.get('pools', [])] + if len(pools) == 0: + detail = 'No pools available' + self.log.info(detail) + return -errno.ENOENT, detail + # shuffle pool list so they all get equal (in)attention + random.shuffle(pools) + self.log.info('pools %s' % pools) + + adjusted_pools = [] + inc = plan.inc + total_did = 0 + left = max_optimizations + pools_with_pg_merge = [p['pool_name'] for p in osdmap_dump.get('pools', []) + if p['pg_num'] > p['pg_num_target']] + crush_rule_by_pool_name = dict((p['pool_name'], p['crush_rule']) + for p in osdmap_dump.get('pools', [])) + for pool in pools: + if pool not in crush_rule_by_pool_name: + self.log.info('pool %s does not exist' % pool) + continue + if pool in pools_with_pg_merge: + self.log.info('pool %s has pending PG(s) for merging, skipping for now' % pool) + continue + adjusted_pools.append(pool) + # shuffle so all pools get equal (in)attention + random.shuffle(adjusted_pools) + pool_dump = osdmap_dump.get('pools', []) + for pool in adjusted_pools: + for p in pool_dump: + if p['pool_name'] == pool: + pool_id = p['pool'] + break + + # note that here we deliberately exclude any scrubbing pgs too + # since scrubbing activities have significant impacts on performance + num_pg_active_clean = 0 + for p in plan.pg_status.get('pgs_by_pool_state', []): + pgs_pool_id = p['pool_id'] + if pgs_pool_id != pool_id: + continue + for s in p['pg_state_counts']: + if s['state_name'] == 'active+clean': + num_pg_active_clean += s['count'] + break + available = min(left, num_pg_active_clean) + did = plan.osdmap.calc_pg_upmaps(inc, max_deviation, available, [pool]) + total_did += did + left -= did + if left <= 0: + break + self.log.info('prepared %d/%d changes' % (total_did, max_optimizations)) + if total_did == 0: + self.no_optimization_needed = True + return -errno.EALREADY, 'Unable to find further optimization, ' \ + 'or pool(s) pg_num is decreasing, ' \ + 'or distribution is already perfect' + return 0, '' + + def do_crush_compat(self, plan: MsPlan) -> Tuple[int, str]: + self.log.info('do_crush_compat') + max_iterations = cast(int, self.get_module_option('crush_compat_max_iterations')) + if max_iterations < 1: + return -errno.EINVAL, '"crush_compat_max_iterations" must be >= 1' + step = cast(float, self.get_module_option('crush_compat_step')) + if step <= 0 or step >= 1.0: + return -errno.EINVAL, '"crush_compat_step" must be in (0, 1)' + max_misplaced = cast(float, self.get_ceph_option('target_max_misplaced_ratio')) + min_pg_per_osd = 2 + + ms = plan.initial + osdmap = ms.osdmap + crush = osdmap.get_crush() + pe = self.calc_eval(ms, plan.pools) + min_score_to_optimize = cast(float, self.get_module_option('min_score')) + if pe.score <= min_score_to_optimize: + if pe.score == 0: + detail = 'Distribution is already perfect' + else: + detail = 'score %f <= min_score %f, will not optimize' \ + % (pe.score, min_score_to_optimize) + self.log.info(detail) + return -errno.EALREADY, detail + + # get current osd reweights + orig_osd_weight = {a['osd']: a['weight'] + for a in ms.osdmap_dump.get('osds', [])} + + # get current compat weight-set weights + orig_ws = self.get_compat_weight_set_weights(ms) + if not orig_ws: + return -errno.EAGAIN, 'compat weight-set not available' + orig_ws = {a: b for a, b in orig_ws.items() if a >= 0} + + # Make sure roots don't overlap their devices. If so, we + # can't proceed. + roots = list(pe.target_by_root.keys()) + self.log.debug('roots %s', roots) + visited: Dict[int, str] = {} + overlap: Dict[int, List[str]] = {} + for root, wm in pe.target_by_root.items(): + for osd in wm: + if osd in visited: + if osd not in overlap: + overlap[osd] = [visited[osd]] + overlap[osd].append(root) + visited[osd] = root + if len(overlap) > 0: + detail = 'Some osds belong to multiple subtrees: %s' % \ + overlap + self.log.error(detail) + return -errno.EOPNOTSUPP, detail + + # rebalance by pgs, objects, or bytes + metrics = cast(str, self.get_module_option('crush_compat_metrics')).split(',') + key = metrics[0] # balancing using the first score metric + if key not in ['pgs', 'bytes', 'objects']: + self.log.warning("Invalid crush_compat balancing key %s. Using 'pgs'." % key) + key = 'pgs' + + # go + best_ws = copy.deepcopy(orig_ws) + best_ow = copy.deepcopy(orig_osd_weight) + best_pe = pe + left = max_iterations + bad_steps = 0 + next_ws = copy.deepcopy(best_ws) + next_ow = copy.deepcopy(best_ow) + while left > 0: + # adjust + self.log.debug('best_ws %s' % best_ws) + random.shuffle(roots) + for root in roots: + pools = best_pe.root_pools[root] + osds = len(best_pe.target_by_root[root]) + min_pgs = osds * min_pg_per_osd + if best_pe.total_by_root[root][key] < min_pgs: + self.log.info('Skipping root %s (pools %s), total pgs %d ' + '< minimum %d (%d per osd)', + root, pools, + best_pe.total_by_root[root][key], + min_pgs, min_pg_per_osd) + continue + self.log.info('Balancing root %s (pools %s) by %s' % + (root, pools, key)) + target = best_pe.target_by_root[root] + actual = best_pe.actual_by_root[root][key] + queue = sorted(actual.keys(), + key=lambda osd: -abs(target[osd] - actual[osd])) + for osd in queue: + if orig_osd_weight[osd] == 0: + self.log.debug('skipping out osd.%d', osd) + else: + deviation = target[osd] - actual[osd] + if deviation == 0: + break + self.log.debug('osd.%d deviation %f', osd, deviation) + weight = best_ws[osd] + ow = orig_osd_weight[osd] + if actual[osd] > 0: + calc_weight = target[osd] / actual[osd] * weight * ow + else: + # for newly created osds, reset calc_weight at target value + # this way weight-set will end up absorbing *step* of its + # target (final) value at the very beginning and slowly catch up later. + # note that if this turns out causing too many misplaced + # pgs, then we'll reduce step and retry + calc_weight = target[osd] + new_weight = weight * (1.0 - step) + calc_weight * step + self.log.debug('Reweight osd.%d %f -> %f', osd, weight, + new_weight) + next_ws[osd] = new_weight + if ow < 1.0: + new_ow = min(1.0, max(step + (1.0 - step) * ow, + ow + .005)) + self.log.debug('Reweight osd.%d reweight %f -> %f', + osd, ow, new_ow) + next_ow[osd] = new_ow + + # normalize weights under this root + root_weight = crush.get_item_weight(pe.root_ids[root]) + root_sum = sum(b for a, b in next_ws.items() + if a in target.keys()) + if root_sum > 0 and root_weight > 0: + factor = root_sum / root_weight + self.log.debug('normalizing root %s %d, weight %f, ' + 'ws sum %f, factor %f', + root, pe.root_ids[root], root_weight, + root_sum, factor) + for osd in actual.keys(): + next_ws[osd] = next_ws[osd] / factor + + # recalc + plan.compat_ws = copy.deepcopy(next_ws) + next_ms = plan.final_state() + next_pe = self.calc_eval(next_ms, plan.pools) + next_misplaced = next_ms.calc_misplaced_from(ms) + self.log.debug('Step result score %f -> %f, misplacing %f', + best_pe.score, next_pe.score, next_misplaced) + + if next_misplaced > max_misplaced: + if best_pe.score < pe.score: + self.log.debug('Step misplaced %f > max %f, stopping', + next_misplaced, max_misplaced) + break + step /= 2.0 + next_ws = copy.deepcopy(best_ws) + next_ow = copy.deepcopy(best_ow) + self.log.debug('Step misplaced %f > max %f, reducing step to %f', + next_misplaced, max_misplaced, step) + else: + if next_pe.score > best_pe.score * 1.0001: + bad_steps += 1 + if bad_steps < 5 and random.randint(0, 100) < 70: + self.log.debug('Score got worse, taking another step') + else: + step /= 2.0 + next_ws = copy.deepcopy(best_ws) + next_ow = copy.deepcopy(best_ow) + self.log.debug('Score got worse, trying smaller step %f', + step) + else: + bad_steps = 0 + best_pe = next_pe + best_ws = copy.deepcopy(next_ws) + best_ow = copy.deepcopy(next_ow) + if best_pe.score == 0: + break + left -= 1 + + # allow a small regression if we are phasing out osd weights + fudge = 0.0 + if best_ow != orig_osd_weight: + fudge = .001 + + if best_pe.score < pe.score + fudge: + self.log.info('Success, score %f -> %f', pe.score, best_pe.score) + plan.compat_ws = best_ws + for osd, w in best_ow.items(): + if w != orig_osd_weight[osd]: + self.log.debug('osd.%d reweight %f', osd, w) + plan.osd_weights[osd] = w + return 0, '' + else: + self.log.info('Failed to find further optimization, score %f', + pe.score) + plan.compat_ws = {} + return -errno.EDOM, 'Unable to find further optimization, ' \ + 'change balancer mode and retry might help' + + def get_compat_weight_set_weights(self, ms: MappingState): + have_choose_args = CRUSHMap.have_default_choose_args(ms.crush_dump) + if have_choose_args: + # get number of buckets in choose_args + choose_args_len = len(CRUSHMap.get_default_choose_args(ms.crush_dump)) + if not have_choose_args or choose_args_len != len(ms.crush_dump['buckets']): + # enable compat weight-set first + self.log.debug('no choose_args or all buckets do not have weight-sets') + self.log.debug('ceph osd crush weight-set create-compat') + result = CommandResult('') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd crush weight-set create-compat', + 'format': 'json', + }), '') + r, outb, outs = result.wait() + if r != 0: + self.log.error('Error creating compat weight-set') + return + + result = CommandResult('') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd crush dump', + 'format': 'json', + }), '') + r, outb, outs = result.wait() + if r != 0: + self.log.error('Error dumping crush map') + return + try: + crushmap = json.loads(outb) + except json.JSONDecodeError: + raise RuntimeError('unable to parse crush map') + else: + crushmap = ms.crush_dump + + raw = CRUSHMap.get_default_choose_args(crushmap) + weight_set = {} + for b in raw: + bucket = None + for t in crushmap['buckets']: + if t['id'] == b['bucket_id']: + bucket = t + break + if not bucket: + raise RuntimeError('could not find bucket %s' % b['bucket_id']) + self.log.debug('bucket items %s' % bucket['items']) + self.log.debug('weight set %s' % b['weight_set'][0]) + if len(bucket['items']) != len(b['weight_set'][0]): + raise RuntimeError('weight-set size does not match bucket items') + for pos in range(len(bucket['items'])): + weight_set[bucket['items'][pos]['id']] = b['weight_set'][0][pos] + + self.log.debug('weight_set weights %s' % weight_set) + return weight_set + + def do_crush(self) -> None: + self.log.info('do_crush (not yet implemented)') + + def do_osd_weight(self) -> None: + self.log.info('do_osd_weight (not yet implemented)') + + def execute(self, plan: Plan) -> Tuple[int, str]: + self.log.info('Executing plan %s' % plan.name) + + commands = [] + + # compat weight-set + if len(plan.compat_ws): + ms_plan = cast(MsPlan, plan) + if not CRUSHMap.have_default_choose_args(ms_plan.initial.crush_dump): + self.log.debug('ceph osd crush weight-set create-compat') + result = CommandResult('') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd crush weight-set create-compat', + 'format': 'json', + }), '') + r, outb, outs = result.wait() + if r != 0: + self.log.error('Error creating compat weight-set') + return r, outs + + for osd, weight in plan.compat_ws.items(): + self.log.info('ceph osd crush weight-set reweight-compat osd.%d %f', + osd, weight) + result = CommandResult('') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd crush weight-set reweight-compat', + 'format': 'json', + 'item': 'osd.%d' % osd, + 'weight': [weight], + }), '') + commands.append(result) + + # new_weight + reweightn = {} + for osd, weight in plan.osd_weights.items(): + reweightn[str(osd)] = str(int(weight * float(0x10000))) + if len(reweightn): + self.log.info('ceph osd reweightn %s', reweightn) + result = CommandResult('') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd reweightn', + 'format': 'json', + 'weights': json.dumps(reweightn), + }), '') + commands.append(result) + + # upmap + incdump = plan.inc.dump() + for item in incdump.get('new_pg_upmap', []): + self.log.info('ceph osd pg-upmap %s mappings %s', item['pgid'], + item['osds']) + result = CommandResult('foo') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd pg-upmap', + 'format': 'json', + 'pgid': item['pgid'], + 'id': item['osds'], + }), 'foo') + commands.append(result) + + for pgid in incdump.get('old_pg_upmap', []): + self.log.info('ceph osd rm-pg-upmap %s', pgid) + result = CommandResult('foo') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd rm-pg-upmap', + 'format': 'json', + 'pgid': pgid, + }), 'foo') + commands.append(result) + + for item in incdump.get('new_pg_upmap_items', []): + self.log.info('ceph osd pg-upmap-items %s mappings %s', item['pgid'], + item['mappings']) + osdlist = [] + for m in item['mappings']: + osdlist += [m['from'], m['to']] + result = CommandResult('foo') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd pg-upmap-items', + 'format': 'json', + 'pgid': item['pgid'], + 'id': osdlist, + }), 'foo') + commands.append(result) + + for pgid in incdump.get('old_pg_upmap_items', []): + self.log.info('ceph osd rm-pg-upmap-items %s', pgid) + result = CommandResult('foo') + self.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd rm-pg-upmap-items', + 'format': 'json', + 'pgid': pgid, + }), 'foo') + commands.append(result) + + # wait for commands + self.log.debug('commands %s' % commands) + for result in commands: + r, outb, outs = result.wait() + if r != 0: + self.log.error('execute error: r = %d, detail = %s' % (r, outs)) + return r, outs + self.log.debug('done') + return 0, '' + + def gather_telemetry(self) -> Dict[str, Any]: + return { + 'active': self.active, + 'mode': self.mode, + } diff --git a/src/pybind/mgr/ceph_module.pyi b/src/pybind/mgr/ceph_module.pyi new file mode 100644 index 000000000..50147f08f --- /dev/null +++ b/src/pybind/mgr/ceph_module.pyi @@ -0,0 +1,118 @@ +# This is an interface definition of classes that are generated within C++. +# Used by mypy to do proper type checking of mgr modules. +# Without this file, all classes have undefined base classes. + +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union +try: + from typing import Protocol # Protocol was added in Python 3.8 +except ImportError: + class Protocol: # type: ignore + pass + + +class BasePyOSDMap(object): + def _get_epoch(self): ... + def _get_crush_version(self): ... + def _dump(self):... + def _new_incremental(self):... + def _apply_incremental(self, inc: 'BasePyOSDMapIncremental'):... + def _get_crush(self):... + def _get_pools_by_take(self, take):... + def _calc_pg_upmaps(self, inc, max_deviation, max_iterations, pool):... + def _map_pool_pgs_up(self, poolid):... + def _pg_to_up_acting_osds(self, pool_id, ps):... + def _pool_raw_used_rate(self, pool_id):... + @classmethod + def _build_simple(cls, epoch: int, uuid: Optional[str], num_osd: int) -> 'BasePyOSDMap' :... + +class BasePyOSDMapIncremental(object): + def _get_epoch(self):... + def _dump(self):... + def _set_osd_reweights(self, weightmap):... + def _set_crush_compat_weight_set_weights(self, weightmap):... + +class BasePyCRUSH(object): + def _dump(self):... + def _get_item_weight(self, item):... + def _get_item_name(self, item):... + def _find_roots(self):... + def _find_takes(self):... + def _get_take_weight_osd_map(self, root):... + +class BaseMgrStandbyModule(object): + def __init__(self, capsule): pass + def _ceph_get(self, data_name: str) -> Dict[str, Any]: ... + def _ceph_get_mgr_id(self):... + def _ceph_get_module_option(self, key, prefix=None):... + def _ceph_get_option(self, key):... + def _ceph_get_store(self, key):... + def _ceph_get_active_uri(self):... + + +OptionValue = Optional[Union[bool, int, float, str]] + + +class CompletionT(Protocol): + def complete(self, r: int, outb: str, outs: str) -> None: ... + + +ServerInfoT = Dict[str, Union[str, List[Dict[str, str]]]] +HealthCheckT = Mapping[str, Union[int, str, Sequence[str]]] +PerfCounterT = Dict[str, Any] + +class BaseMgrModule(object): + def __init__(self, py_modules_ptr: object, this_ptr: object) -> None: pass + def _ceph_get_version(self) -> str: ... + def _ceph_get_release_name(self) -> str: ... + def _ceph_lookup_release_name(self, release: int) -> str: ... + def _ceph_cluster_log(self, channel: str, priority: int, message: str) -> None: ... + def _ceph_get_context(self) -> object: ... + def _ceph_get(self, data_name: str) -> Any: ... + def _ceph_get_server(self, hostname: Optional[str]) -> Union[ServerInfoT, + List[ServerInfoT]]: ... + def _ceph_get_perf_schema(self, svc_type: str, svc_name: str) -> Dict[str, Any]: ... + def _ceph_get_rocksdb_version(self) -> str: ... + def _ceph_get_counter(self, svc_type: str, svc_name: str, path: str) -> Dict[str, List[Tuple[float, int]]]: ... + def _ceph_get_latest_counter(self, svc_type, svc_name, path): ... + def _ceph_get_metadata(self, svc_type, svc_id): ... + def _ceph_get_daemon_status(self, svc_type, svc_id): ... + def _ceph_send_command(self, + result: CompletionT, + svc_type: str, + svc_id: str, + command: str, + tag: str, + inbuf: Optional[str]) -> None: ... + def _ceph_set_health_checks(self, checks: Mapping[str, HealthCheckT]) -> None: ... + def _ceph_get_mgr_id(self) -> str: ... + def _ceph_get_ceph_conf_path(self) -> str: ... + def _ceph_get_option(self, key: str) -> OptionValue: ... + def _ceph_get_foreign_option(self, entity: str, key: str) -> OptionValue: ... + def _ceph_get_module_option(self, + key: str, + default: str, + localized_prefix: str = "") -> OptionValue: ... + def _ceph_get_store_prefix(self, key_prefix) -> Dict[str, str]: ... + def _ceph_set_module_option(self, module: str, key: str, val: Optional[str]) -> None: ... + def _ceph_set_store(self, key: str, val: Optional[str]) -> None: ... + def _ceph_get_store(self, key: str) -> Optional[str]: ... + # mgr actually imports OSDMap from mgr_module and constructs an OSDMap + def _ceph_get_osdmap(self) -> BasePyOSDMap: ... + def _ceph_set_uri(self, uri: str) -> None: ... + def _ceph_set_device_wear_level(self, devid: str, val: float) -> None: ... + def _ceph_have_mon_connection(self) -> bool: ... + def _ceph_update_progress_event(self, evid: str, desc: str, progress: float, add_to_ceph_s: bool) -> None: ... + def _ceph_complete_progress_event(self, evid: str) -> None: ... + def _ceph_clear_all_progress_events(self) -> None: ... + def _ceph_dispatch_remote(self, module_name: str, method_name: str, *args: Any, **kwargs: Any) -> Any: ... + def _ceph_add_osd_perf_query(self, query: Dict[str, Dict[str, Any]]) -> Optional[int]: ... + def _ceph_remove_osd_perf_query(self, query_id: int) -> None: ... + def _ceph_get_osd_perf_counters(self, query_id: int) -> Optional[Dict[str, List[PerfCounterT]]]: ... + def _ceph_add_mds_perf_query(self, query: Dict[str, Dict[str, Any]]) -> Optional[int]: ... + def _ceph_remove_mds_perf_query(self, query_id: int) -> None: ... + def _ceph_reregister_mds_perf_queries(self) -> None: ... + def _ceph_get_mds_perf_counters(self, query_id: int) -> Optional[Dict[str, List[PerfCounterT]]]: ... + def _ceph_unregister_client(self, name: Optional[str], addrs: str) -> None: ... + def _ceph_register_client(self, name: Optional[str], addrs: str, replace: Optional[bool]) -> None: ... + def _ceph_is_authorized(self, arguments: Dict[str, str]) -> bool: ... + def _ceph_get_daemon_health_metrics(self) -> Dict[str, List[Dict[str, Any]]]: ... diff --git a/src/pybind/mgr/cephadm/.gitignore b/src/pybind/mgr/cephadm/.gitignore new file mode 100644 index 000000000..a273f8603 --- /dev/null +++ b/src/pybind/mgr/cephadm/.gitignore @@ -0,0 +1,2 @@ +.vagrant +ssh-config diff --git a/src/pybind/mgr/cephadm/HACKING.rst b/src/pybind/mgr/cephadm/HACKING.rst new file mode 100644 index 000000000..fa6ea9e1b --- /dev/null +++ b/src/pybind/mgr/cephadm/HACKING.rst @@ -0,0 +1,272 @@ +Development +=========== + + +There are multiple ways to set up a development environment for the SSH orchestrator. +In the following I'll use the `vstart` method. + +1) Make sure remoto is installed (0.35 or newer) + +2) Use vstart to spin up a cluster + + +:: + + # ../src/vstart.sh -n --cephadm + +*Note that when you specify `--cephadm` you have to have passwordless ssh access to localhost* + +It will add your ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub to `mgr/ssh/ssh_identity_{key, pub}` +and add your $HOSTNAME to the list of known hosts. + +This will also enable the cephadm mgr module and enable it as the orchestrator backend. + +*Optional:* + +While the above is sufficient for most operations, you may want to add a second host to the mix. +There is `Vagrantfile` for creating a minimal cluster in `src/pybind/mgr/cephadm/`. + +If you wish to extend the one-node-localhost cluster to i.e. test more sophisticated OSD deployments you can follow the next steps: + +From within the `src/pybind/mgr/cephadm` directory. + + +1) Spawn VMs + +:: + + # vagrant up + +This will spawn three machines by default. +mon0, mgr0 and osd0 with 2 additional disks. + +You can change that by passing `MONS` (default: 1), `MGRS` (default: 1), `OSDS` (default: 1) and +`DISKS` (default: 2) environment variables to overwrite the defaults. In order to not always have +to set the environment variables you can now create as JSON see `./vagrant.config.example.json` +for details. + +If will also come with the necessary packages preinstalled as well as your ~/.ssh/id_rsa.pub key +injected. (to users root and vagrant; the cephadm-orchestrator currently connects as root) + + +2) Update the ssh-config + +The cephadm orchestrator needs to understand how to connect to the new node. Most likely the VM +isn't reachable with the default settings used: + +``` +Host * +User root +StrictHostKeyChecking no +``` + +You want to adjust this by retrieving an adapted ssh_config from Vagrant. + +:: + + # vagrant ssh-config > ssh-config + + +Now set the newly created config for Ceph. + +:: + + # ceph cephadm set-ssh-config -i + + +3) Add the new host + +Add the newly created host(s) to the inventory. + +:: + + + # ceph orch host add + + +4) Verify the inventory + +You should see the hostname in the list. + +:: + + # ceph orch host ls + + +5) Verify the devices + +To verify all disks are set and in good shape look if all devices have been spawned +and can be found + +:: + + # ceph orch device ls + + +6) Make a snapshot of all your VMs! + +To not go the long way again the next time snapshot your VMs in order to revert them back +if they are dirty. + +In `this repository `_ you can find two +scripts that will help you with doing a snapshot and reverting it, without having to manual +snapshot and revert each VM individually. + + +Understanding ``AsyncCompletion`` +================================= + +How can I store temporary variables? +------------------------------------ + +Let's imagine you want to write code similar to + +.. code:: python + + hosts = self.get_hosts() + inventory = self.get_inventory(hosts) + return self._create_osd(hosts, drive_group, inventory) + +That won't work, as ``get_hosts`` and ``get_inventory`` return objects +of type ``AsyncCompletion``. + +Now let's imaging a Python 3 world, where we can use ``async`` and +``await``. Then we actually can write this like so: + +.. code:: python + + hosts = await self.get_hosts() + inventory = await self.get_inventory(hosts) + return self._create_osd(hosts, drive_group, inventory) + +Let's use a simple example to make this clear: + +.. code:: python + + val = await func_1() + return func_2(val) + +As we're not yet in Python 3, we need to do write ``await`` manually by +calling ``orchestrator.Completion.then()``: + +.. code:: python + + func_1().then(lambda val: func_2(val)) + + # or + func_1().then(func_2) + +Now let's desugar the original example: + +.. code:: python + + hosts = await self.get_hosts() + inventory = await self.get_inventory(hosts) + return self._create_osd(hosts, drive_group, inventory) + +Now let's replace one ``async`` at a time: + +.. code:: python + + hosts = await self.get_hosts() + return self.get_inventory(hosts).then(lambda inventory: + self._create_osd(hosts, drive_group, inventory)) + +Then finally: + +.. code:: python + + self.get_hosts().then(lambda hosts: + self.get_inventory(hosts).then(lambda inventory: + self._create_osd(hosts, + drive_group, inventory))) + +This also works without lambdas: + +.. code:: python + + def call_inventory(hosts): + def call_create(inventory) + return self._create_osd(hosts, drive_group, inventory) + + return self.get_inventory(hosts).then(call_create) + + self.get_hosts(call_inventory) + +We should add support for ``await`` as soon as we're on Python 3. + +I want to call my function for every host! +------------------------------------------ + +Imagine you have a function that looks like so: + +.. code:: python + + @async_completion + def deploy_stuff(name, node): + ... + +And you want to call ``deploy_stuff`` like so: + +.. code:: python + + return [deploy_stuff(name, node) for node in nodes] + +This won't work as expected. The number of ``AsyncCompletion`` objects +created should be ``O(1)``. But there is a solution: +``@async_map_completion`` + +.. code:: python + + @async_map_completion + def deploy_stuff(name, node): + ... + + return deploy_stuff([(name, node) for node in nodes]) + +This way, we're only creating one ``AsyncCompletion`` object. Note that +you should not create new ``AsyncCompletion`` within ``deploy_stuff``, as +we're then no longer have ``O(1)`` completions: + +.. code:: python + + @async_completion + def other_async_function(): + ... + + @async_map_completion + def deploy_stuff(name, node): + return other_async_function() # wrong! + +Why do we need this? +-------------------- + +I've tried to look into making Completions composable by being able to +call one completion from another completion. I.e. making them re-usable +using Promises E.g.: + +.. code:: python + + >>> return self.get_hosts().then(self._create_osd) + +where ``get_hosts`` returns a Completion of list of hosts and +``_create_osd`` takes a list of hosts. + +The concept behind this is to store the computation steps explicit and +then explicitly evaluate the chain: + +.. code:: python + + p = Completion(on_complete=lambda x: x*2).then(on_complete=lambda x: str(x)) + p.finalize(2) + assert p.result = "4" + +or graphically: + +:: + + +---------------+ +-----------------+ + | | then | | + | lambda x: x*x | +--> | lambda x: str(x)| + | | | | + +---------------+ +-----------------+ diff --git a/src/pybind/mgr/cephadm/Vagrantfile b/src/pybind/mgr/cephadm/Vagrantfile new file mode 100644 index 000000000..638258c3a --- /dev/null +++ b/src/pybind/mgr/cephadm/Vagrantfile @@ -0,0 +1,66 @@ +# vi: set ft=ruby : +# +# In order to reduce the need of recreating all vagrant boxes everytime they +# get dirty, snapshot them and revert the snapshot of them instead. +# Two helpful scripts to do this easily can be found here: +# https://github.com/Devp00l/vagrant-helper-scripts + +require 'json' +configFileName = 'vagrant.config.json' +CONFIG = File.file?(configFileName) && JSON.parse(File.read(File.join(File.dirname(__FILE__), configFileName))) + +def getConfig(name, default) + down = name.downcase + up = name.upcase + CONFIG && CONFIG[down] ? CONFIG[down] : (ENV[up] ? ENV[up].to_i : default) +end + +OSDS = getConfig('OSDS', 1) +MGRS = getConfig('MGRS', 1) +MONS = getConfig('MONS', 1) +DISKS = getConfig('DISKS', 2) + +# Activate only for test purpose as it changes the output of each vagrant command link to get the ssh_config. +# puts "Your setup:","OSDs: #{OSDS}","MGRs: #{MGRS}","MONs: #{MONS}","Disks per OSD: #{DISKS}" + +Vagrant.configure("2") do |config| + config.vm.synced_folder ".", "/vagrant", disabled: true + config.vm.network "private_network", type: "dhcp" + config.vm.box = "centos/stream8" + + (0..MONS - 1).each do |i| + config.vm.define "mon#{i}" do |mon| + mon.vm.hostname = "mon#{i}" + end + end + (0..MGRS - 1).each do |i| + config.vm.define "mgr#{i}" do |mgr| + mgr.vm.hostname = "mgr#{i}" + end + end + (0..OSDS - 1).each do |i| + config.vm.define "osd#{i}" do |osd| + osd.vm.hostname = "osd#{i}" + osd.vm.provider :libvirt do |libvirt| + (0..DISKS - 1).each do |d| + # In ruby value.chr makes ASCII char from value + libvirt.storage :file, :size => '20G', :device => "vd#{(98+d).chr}#{i}" + end + end + end + end + + config.vm.provision "file", source: "~/.ssh/id_rsa.pub", destination: "~/.ssh/id_rsa.pub" + config.vm.provision "shell", inline: <<-SHELL + cat /home/vagrant/.ssh/id_rsa.pub >> /home/vagrant/.ssh/authorized_keys + sudo cp -r /home/vagrant/.ssh /root/.ssh + SHELL + + config.vm.provision "shell", inline: <<-SHELL + sudo yum install -y yum-utils + sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + sudo rpm --import 'https://download.ceph.com/keys/release.asc' + curl -L https://shaman.ceph.com/api/repos/ceph/main/latest/centos/8/repo/ | sudo tee /etc/yum.repos.d/shaman.repo + sudo yum install -y python36 podman cephadm libseccomp-devel + SHELL +end diff --git a/src/pybind/mgr/cephadm/__init__.py b/src/pybind/mgr/cephadm/__init__.py new file mode 100644 index 000000000..597d883f7 --- /dev/null +++ b/src/pybind/mgr/cephadm/__init__.py @@ -0,0 +1,10 @@ +from .module import CephadmOrchestrator + +__all__ = [ + "CephadmOrchestrator", +] + +import os +if 'UNITTEST' in os.environ: + import tests + __all__.append(tests.__name__) diff --git a/src/pybind/mgr/cephadm/agent.py b/src/pybind/mgr/cephadm/agent.py new file mode 100644 index 000000000..93a08cb34 --- /dev/null +++ b/src/pybind/mgr/cephadm/agent.py @@ -0,0 +1,471 @@ +try: + import cherrypy + from cherrypy._cpserver import Server +except ImportError: + # to avoid sphinx build crash + class Server: # type: ignore + pass + +import json +import logging +import socket +import ssl +import tempfile +import threading +import time + +from orchestrator import DaemonDescriptionStatus +from orchestrator._interface import daemon_type_to_service +from ceph.utils import datetime_now +from ceph.deployment.inventory import Devices +from ceph.deployment.service_spec import ServiceSpec, PlacementSpec +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec +from cephadm.ssl_cert_utils import SSLCerts +from mgr_util import test_port_allocation, PortAlreadyInUse + +from typing import Any, Dict, List, Set, TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + + +def cherrypy_filter(record: logging.LogRecord) -> int: + blocked = [ + 'TLSV1_ALERT_DECRYPT_ERROR' + ] + msg = record.getMessage() + return not any([m for m in blocked if m in msg]) + + +logging.getLogger('cherrypy.error').addFilter(cherrypy_filter) +cherrypy.log.access_log.propagate = False + + +class AgentEndpoint: + + KV_STORE_AGENT_ROOT_CERT = 'cephadm_agent/root/cert' + KV_STORE_AGENT_ROOT_KEY = 'cephadm_agent/root/key' + + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr = mgr + self.ssl_certs = SSLCerts() + self.server_port = 7150 + self.server_addr = self.mgr.get_mgr_ip() + + def configure_routes(self) -> None: + d = cherrypy.dispatch.RoutesDispatcher() + d.connect(name='host-data', route='/data/', + controller=self.host_data.POST, + conditions=dict(method=['POST'])) + cherrypy.tree.mount(None, '/', config={'/': {'request.dispatch': d}}) + + def configure_tls(self, server: Server) -> None: + old_cert = self.mgr.get_store(self.KV_STORE_AGENT_ROOT_CERT) + old_key = self.mgr.get_store(self.KV_STORE_AGENT_ROOT_KEY) + if old_cert and old_key: + self.ssl_certs.load_root_credentials(old_cert, old_key) + else: + self.ssl_certs.generate_root_cert(self.mgr.get_mgr_ip()) + self.mgr.set_store(self.KV_STORE_AGENT_ROOT_CERT, self.ssl_certs.get_root_cert()) + self.mgr.set_store(self.KV_STORE_AGENT_ROOT_KEY, self.ssl_certs.get_root_key()) + + host = self.mgr.get_hostname() + addr = self.mgr.get_mgr_ip() + server.ssl_certificate, server.ssl_private_key = self.ssl_certs.generate_cert_files(host, addr) + + def find_free_port(self) -> None: + max_port = self.server_port + 150 + while self.server_port <= max_port: + try: + test_port_allocation(self.server_addr, self.server_port) + self.host_data.socket_port = self.server_port + self.mgr.log.debug(f'Cephadm agent endpoint using {self.server_port}') + return + except PortAlreadyInUse: + self.server_port += 1 + self.mgr.log.error(f'Cephadm agent could not find free port in range {max_port - 150}-{max_port} and failed to start') + + def configure(self) -> None: + self.host_data = HostData(self.mgr, self.server_port, self.server_addr) + self.configure_tls(self.host_data) + self.configure_routes() + self.find_free_port() + + +class HostData(Server): + exposed = True + + def __init__(self, mgr: "CephadmOrchestrator", port: int, host: str): + self.mgr = mgr + super().__init__() + self.socket_port = port + self.socket_host = host + self.subscribe() + + def stop(self) -> None: + # we must call unsubscribe before stopping the server, + # otherwise the port is not released and we will get + # an exception when trying to restart it + self.unsubscribe() + super().stop() + + @cherrypy.tools.json_in() + @cherrypy.tools.json_out() + def POST(self) -> Dict[str, Any]: + data: Dict[str, Any] = cherrypy.request.json + results: Dict[str, Any] = {} + try: + self.check_request_fields(data) + except Exception as e: + results['result'] = f'Bad metadata: {e}' + self.mgr.log.warning(f'Received bad metadata from an agent: {e}') + else: + # if we got here, we've already verified the keyring of the agent. If + # host agent is reporting on is marked offline, it shouldn't be any more + self.mgr.offline_hosts_remove(data['host']) + results['result'] = self.handle_metadata(data) + return results + + def check_request_fields(self, data: Dict[str, Any]) -> None: + fields = '{' + ', '.join([key for key in data.keys()]) + '}' + if 'host' not in data: + raise Exception( + f'No host in metadata from agent ("host" field). Only received fields {fields}') + host = data['host'] + if host not in self.mgr.cache.get_hosts(): + raise Exception(f'Received metadata from agent on unknown hostname {host}') + if 'keyring' not in data: + raise Exception( + f'Agent on host {host} not reporting its keyring for validation ("keyring" field). Only received fields {fields}') + if host not in self.mgr.agent_cache.agent_keys: + raise Exception(f'No agent keyring stored for host {host}. Cannot verify agent') + if data['keyring'] != self.mgr.agent_cache.agent_keys[host]: + raise Exception(f'Got wrong keyring from agent on host {host}.') + if 'port' not in data: + raise Exception( + f'Agent on host {host} not reporting its listener port ("port" fields). Only received fields {fields}') + if 'ack' not in data: + raise Exception( + f'Agent on host {host} not reporting its counter value ("ack" field). Only received fields {fields}') + try: + int(data['ack']) + except Exception as e: + raise Exception( + f'Counter value from agent on host {host} could not be converted to an integer: {e}') + metadata_types = ['ls', 'networks', 'facts', 'volume'] + metadata_types_str = '{' + ', '.join(metadata_types) + '}' + if not all(item in data.keys() for item in metadata_types): + self.mgr.log.warning( + f'Agent on host {host} reported incomplete metadata. Not all of {metadata_types_str} were present. Received fields {fields}') + + def handle_metadata(self, data: Dict[str, Any]) -> str: + try: + host = data['host'] + self.mgr.agent_cache.agent_ports[host] = int(data['port']) + if host not in self.mgr.agent_cache.agent_counter: + self.mgr.agent_cache.agent_counter[host] = 1 + self.mgr.agent_helpers._request_agent_acks({host}) + res = f'Got metadata from agent on host {host} with no known counter entry. Starting counter at 1 and requesting new metadata' + self.mgr.log.debug(res) + return res + + # update timestamp of most recent agent update + self.mgr.agent_cache.agent_timestamp[host] = datetime_now() + + error_daemons_old = set([dd.name() for dd in self.mgr.cache.get_error_daemons()]) + daemon_count_old = len(self.mgr.cache.get_daemons_by_host(host)) + + up_to_date = False + + int_ack = int(data['ack']) + if int_ack == self.mgr.agent_cache.agent_counter[host]: + up_to_date = True + else: + # we got old counter value with message, inform agent of new timestamp + if not self.mgr.agent_cache.messaging_agent(host): + self.mgr.agent_helpers._request_agent_acks({host}) + self.mgr.log.debug( + f'Received old metadata from agent on host {host}. Requested up-to-date metadata.') + + if 'ls' in data and data['ls']: + self.mgr._process_ls_output(host, data['ls']) + self.mgr.update_failed_daemon_health_check() + if 'networks' in data and data['networks']: + self.mgr.cache.update_host_networks(host, data['networks']) + if 'facts' in data and data['facts']: + self.mgr.cache.update_host_facts(host, json.loads(data['facts'])) + if 'volume' in data and data['volume']: + ret = Devices.from_json(json.loads(data['volume'])) + self.mgr.cache.update_host_devices(host, ret.devices) + + if ( + error_daemons_old != set([dd.name() for dd in self.mgr.cache.get_error_daemons()]) + or daemon_count_old != len(self.mgr.cache.get_daemons_by_host(host)) + ): + self.mgr.log.debug( + f'Change detected in state of daemons from {host} agent metadata. Kicking serve loop') + self.mgr._kick_serve_loop() + + if up_to_date and ('ls' in data and data['ls']): + was_out_of_date = not self.mgr.cache.all_host_metadata_up_to_date() + self.mgr.cache.metadata_up_to_date[host] = True + if was_out_of_date and self.mgr.cache.all_host_metadata_up_to_date(): + self.mgr.log.debug( + 'New metadata from agent has made all hosts up to date. Kicking serve loop') + self.mgr._kick_serve_loop() + self.mgr.log.debug( + f'Received up-to-date metadata from agent on host {host}.') + + self.mgr.agent_cache.save_agent(host) + return 'Successfully processed metadata.' + + except Exception as e: + err_str = f'Failed to update metadata with metadata from agent on host {host}: {e}' + self.mgr.log.warning(err_str) + return err_str + + +class AgentMessageThread(threading.Thread): + def __init__(self, host: str, port: int, data: Dict[Any, Any], mgr: "CephadmOrchestrator", daemon_spec: Optional[CephadmDaemonDeploySpec] = None) -> None: + self.mgr = mgr + self.agent = mgr.http_server.agent + self.host = host + self.addr = self.mgr.inventory.get_addr(host) if host in self.mgr.inventory else host + self.port = port + self.data: str = json.dumps(data) + self.daemon_spec: Optional[CephadmDaemonDeploySpec] = daemon_spec + super().__init__(target=self.run) + + def run(self) -> None: + self.mgr.log.debug(f'Sending message to agent on host {self.host}') + self.mgr.agent_cache.sending_agent_message[self.host] = True + try: + assert self.agent + root_cert = self.agent.ssl_certs.get_root_cert() + root_cert_tmp = tempfile.NamedTemporaryFile() + root_cert_tmp.write(root_cert.encode('utf-8')) + root_cert_tmp.flush() + root_cert_fname = root_cert_tmp.name + + cert, key = self.agent.ssl_certs.generate_cert( + self.mgr.get_hostname(), self.mgr.get_mgr_ip()) + + cert_tmp = tempfile.NamedTemporaryFile() + cert_tmp.write(cert.encode('utf-8')) + cert_tmp.flush() + cert_fname = cert_tmp.name + + key_tmp = tempfile.NamedTemporaryFile() + key_tmp.write(key.encode('utf-8')) + key_tmp.flush() + key_fname = key_tmp.name + + ssl_ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=root_cert_fname) + ssl_ctx.verify_mode = ssl.CERT_REQUIRED + ssl_ctx.check_hostname = True + ssl_ctx.load_cert_chain(cert_fname, key_fname) + except Exception as e: + self.mgr.log.error(f'Failed to get certs for connecting to agent: {e}') + self.mgr.agent_cache.sending_agent_message[self.host] = False + return + try: + bytes_len: str = str(len(self.data.encode('utf-8'))) + if len(bytes_len.encode('utf-8')) > 10: + raise Exception( + f'Message is too big to send to agent. Message size is {bytes_len} bytes!') + while len(bytes_len.encode('utf-8')) < 10: + bytes_len = '0' + bytes_len + except Exception as e: + self.mgr.log.error(f'Failed to get length of json payload: {e}') + self.mgr.agent_cache.sending_agent_message[self.host] = False + return + for retry_wait in [3, 5]: + try: + agent_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + secure_agent_socket = ssl_ctx.wrap_socket(agent_socket, server_hostname=self.addr) + secure_agent_socket.connect((self.addr, self.port)) + msg = (bytes_len + self.data) + secure_agent_socket.sendall(msg.encode('utf-8')) + agent_response = secure_agent_socket.recv(1024).decode() + self.mgr.log.debug(f'Received "{agent_response}" from agent on host {self.host}') + if self.daemon_spec: + self.mgr.agent_cache.agent_config_successfully_delivered(self.daemon_spec) + self.mgr.agent_cache.sending_agent_message[self.host] = False + return + except ConnectionError as e: + # if it's a connection error, possibly try to connect again. + # We could have just deployed agent and it might not be ready + self.mgr.log.debug( + f'Retrying connection to agent on {self.host} in {str(retry_wait)} seconds. Connection failed with: {e}') + time.sleep(retry_wait) + except Exception as e: + # if it's not a connection error, something has gone wrong. Give up. + self.mgr.log.error(f'Failed to contact agent on host {self.host}: {e}') + self.mgr.agent_cache.sending_agent_message[self.host] = False + return + self.mgr.log.error(f'Could not connect to agent on host {self.host}') + self.mgr.agent_cache.sending_agent_message[self.host] = False + return + + +class CephadmAgentHelpers: + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr: "CephadmOrchestrator" = mgr + self.agent = mgr.http_server.agent + + def _request_agent_acks(self, hosts: Set[str], increment: bool = False, daemon_spec: Optional[CephadmDaemonDeploySpec] = None) -> None: + for host in hosts: + if increment: + self.mgr.cache.metadata_up_to_date[host] = False + if host not in self.mgr.agent_cache.agent_counter: + self.mgr.agent_cache.agent_counter[host] = 1 + elif increment: + self.mgr.agent_cache.agent_counter[host] = self.mgr.agent_cache.agent_counter[host] + 1 + payload: Dict[str, Any] = {'counter': self.mgr.agent_cache.agent_counter[host]} + if daemon_spec: + payload['config'] = daemon_spec.final_config + message_thread = AgentMessageThread( + host, self.mgr.agent_cache.agent_ports[host], payload, self.mgr, daemon_spec) + message_thread.start() + + def _request_ack_all_not_up_to_date(self) -> None: + self.mgr.agent_helpers._request_agent_acks( + set([h for h in self.mgr.cache.get_hosts() if + (not self.mgr.cache.host_metadata_up_to_date(h) + and h in self.mgr.agent_cache.agent_ports and not self.mgr.agent_cache.messaging_agent(h))])) + + def _agent_down(self, host: str) -> bool: + # if host is draining or drained (has _no_schedule label) there should not + # be an agent deployed there and therefore we should return False + if self.mgr.cache.is_host_draining(host): + return False + # if we haven't deployed an agent on the host yet, don't say an agent is down + if not self.mgr.cache.get_daemons_by_type('agent', host=host): + return False + # if we don't have a timestamp, it's likely because of a mgr fail over. + # just set the timestamp to now. However, if host was offline before, we + # should not allow creating a new timestamp to cause it to be marked online + if host not in self.mgr.agent_cache.agent_timestamp: + if host in self.mgr.offline_hosts: + return False + self.mgr.agent_cache.agent_timestamp[host] = datetime_now() + # agent hasn't reported in down multiplier * it's refresh rate. Something is likely wrong with it. + down_mult: float = max(self.mgr.agent_down_multiplier, 1.5) + time_diff = datetime_now() - self.mgr.agent_cache.agent_timestamp[host] + if time_diff.total_seconds() > down_mult * float(self.mgr.agent_refresh_rate): + return True + return False + + def _update_agent_down_healthcheck(self, down_agent_hosts: List[str]) -> None: + self.mgr.remove_health_warning('CEPHADM_AGENT_DOWN') + if down_agent_hosts: + detail: List[str] = [] + down_mult: float = max(self.mgr.agent_down_multiplier, 1.5) + for agent in down_agent_hosts: + detail.append((f'Cephadm agent on host {agent} has not reported in ' + f'{down_mult * self.mgr.agent_refresh_rate} seconds. Agent is assumed ' + 'down and host may be offline.')) + for dd in [d for d in self.mgr.cache.get_daemons_by_type('agent') if d.hostname in down_agent_hosts]: + dd.status = DaemonDescriptionStatus.error + self.mgr.set_health_warning( + 'CEPHADM_AGENT_DOWN', + summary='%d Cephadm Agent(s) are not reporting. Hosts may be offline' % ( + len(down_agent_hosts)), + count=len(down_agent_hosts), + detail=detail, + ) + + # this function probably seems very unnecessary, but it makes it considerably easier + # to get the unit tests working. All unit tests that check which daemons were deployed + # or services setup would have to be individually changed to expect an agent service or + # daemons, OR we can put this in its own function then mock the function + def _apply_agent(self) -> None: + spec = ServiceSpec( + service_type='agent', + placement=PlacementSpec(host_pattern='*') + ) + self.mgr.spec_store.save(spec) + + def _handle_use_agent_setting(self) -> bool: + need_apply = False + if self.mgr.use_agent: + # on the off chance there are still agents hanging around from + # when we turned the config option off, we need to redeploy them + # we can tell they're in that state if we don't have a keyring for + # them in the host cache + for agent in self.mgr.cache.get_daemons_by_service('agent'): + if agent.hostname not in self.mgr.agent_cache.agent_keys: + self.mgr._schedule_daemon_action(agent.name(), 'redeploy') + if 'agent' not in self.mgr.spec_store: + self.mgr.agent_helpers._apply_agent() + need_apply = True + else: + if 'agent' in self.mgr.spec_store: + self.mgr.spec_store.rm('agent') + need_apply = True + self.mgr.agent_cache.agent_counter = {} + self.mgr.agent_cache.agent_timestamp = {} + self.mgr.agent_cache.agent_keys = {} + self.mgr.agent_cache.agent_ports = {} + return need_apply + + def _check_agent(self, host: str) -> bool: + down = False + try: + assert self.agent + assert self.agent.ssl_certs.get_root_cert() + except Exception: + self.mgr.log.debug( + f'Delaying checking agent on {host} until cephadm endpoint finished creating root cert') + return down + if self.mgr.agent_helpers._agent_down(host): + down = True + try: + agent = self.mgr.cache.get_daemons_by_type('agent', host=host)[0] + assert agent.daemon_id is not None + assert agent.hostname is not None + except Exception as e: + self.mgr.log.debug( + f'Could not retrieve agent on host {host} from daemon cache: {e}') + return down + try: + spec = self.mgr.spec_store.active_specs.get('agent', None) + deps = self.mgr._calc_daemon_deps(spec, 'agent', agent.daemon_id) + last_deps, last_config = self.mgr.agent_cache.get_agent_last_config_deps(host) + if not last_config or last_deps != deps: + # if root cert is the dep that changed, we must use ssh to reconfig + # so it's necessary to check this one specifically + root_cert_match = False + try: + root_cert = self.agent.ssl_certs.get_root_cert() + if last_deps and root_cert in last_deps: + root_cert_match = True + except Exception: + pass + daemon_spec = CephadmDaemonDeploySpec.from_daemon_description(agent) + # we need to know the agent port to try to reconfig w/ http + # otherwise there is no choice but a full ssh reconfig + if host in self.mgr.agent_cache.agent_ports and root_cert_match and not down: + daemon_spec = self.mgr.cephadm_services[daemon_type_to_service( + daemon_spec.daemon_type)].prepare_create(daemon_spec) + self.mgr.agent_helpers._request_agent_acks( + hosts={daemon_spec.host}, + increment=True, + daemon_spec=daemon_spec, + ) + else: + self.mgr._daemon_action(daemon_spec, action='reconfig') + return down + except Exception as e: + self.mgr.log.debug( + f'Agent on host {host} not ready to have config and deps checked: {e}') + action = self.mgr.cache.get_scheduled_daemon_action(agent.hostname, agent.name()) + if action: + try: + daemon_spec = CephadmDaemonDeploySpec.from_daemon_description(agent) + self.mgr._daemon_action(daemon_spec, action=action) + self.mgr.cache.rm_scheduled_daemon_action(agent.hostname, agent.name()) + except Exception as e: + self.mgr.log.debug( + f'Agent on host {host} not ready to {action}: {e}') + return down diff --git a/src/pybind/mgr/cephadm/autotune.py b/src/pybind/mgr/cephadm/autotune.py new file mode 100644 index 000000000..51c931cba --- /dev/null +++ b/src/pybind/mgr/cephadm/autotune.py @@ -0,0 +1,54 @@ +import logging +from typing import List, Optional, Callable, Any, Tuple + +from orchestrator._interface import DaemonDescription + +logger = logging.getLogger(__name__) + + +class MemoryAutotuner(object): + + min_size_by_type = { + 'mds': 4096 * 1048576, + 'mgr': 4096 * 1048576, + 'mon': 1024 * 1048576, + 'crash': 128 * 1048576, + 'keepalived': 128 * 1048576, + 'haproxy': 128 * 1048576, + } + default_size = 1024 * 1048576 + + def __init__( + self, + daemons: List[DaemonDescription], + config_get: Callable[[str, str], Any], + total_mem: int, + ): + self.daemons = daemons + self.config_get = config_get + self.total_mem = total_mem + + def tune(self) -> Tuple[Optional[int], List[str]]: + tuned_osds: List[str] = [] + total = self.total_mem + for d in self.daemons: + if d.daemon_type == 'mds': + total -= self.config_get(d.name(), 'mds_cache_memory_limit') + continue + if d.daemon_type != 'osd': + assert d.daemon_type + total -= max( + self.min_size_by_type.get(d.daemon_type, self.default_size), + d.memory_usage or 0 + ) + continue + if not self.config_get(d.name(), 'osd_memory_target_autotune'): + total -= self.config_get(d.name(), 'osd_memory_target') + continue + tuned_osds.append(d.name()) + if total < 0: + return None, [] + if not tuned_osds: + return None, [] + per = total // len(tuned_osds) + return int(per), tuned_osds diff --git a/src/pybind/mgr/cephadm/ceph.repo b/src/pybind/mgr/cephadm/ceph.repo new file mode 100644 index 000000000..6f710e7ce --- /dev/null +++ b/src/pybind/mgr/cephadm/ceph.repo @@ -0,0 +1,23 @@ +[ceph] +name=Ceph packages for $basearch +baseurl=https://download.ceph.com/rpm-mimic/el7/$basearch +enabled=1 +priority=2 +gpgcheck=1 +gpgkey=https://download.ceph.com/keys/release.asc + +[ceph-noarch] +name=Ceph noarch packages +baseurl=https://download.ceph.com/rpm-mimic/el7/noarch +enabled=1 +priority=2 +gpgcheck=1 +gpgkey=https://download.ceph.com/keys/release.asc + +[ceph-source] +name=Ceph source packages +baseurl=https://download.ceph.com/rpm-mimic/el7/SRPMS +enabled=0 +priority=2 +gpgcheck=1 +gpgkey=https://download.ceph.com/keys/release.asc diff --git a/src/pybind/mgr/cephadm/configchecks.py b/src/pybind/mgr/cephadm/configchecks.py new file mode 100644 index 000000000..b9dcb18f4 --- /dev/null +++ b/src/pybind/mgr/cephadm/configchecks.py @@ -0,0 +1,705 @@ +import json +import ipaddress +import logging + +from mgr_module import ServiceInfoT + +from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast, Tuple, Callable + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + + +class HostFacts: + + def __init__(self) -> None: + self.arch: Optional[str] = None + self.bios_date: Optional[str] = None + self.bios_version: Optional[str] = None + self.cpu_cores: Optional[int] = None + self.cpu_count: Optional[int] = None + self.cpu_load: Optional[Dict[str, float]] = None + self.cpu_model: Optional[str] = None + self.cpu_threads: Optional[int] = None + self.flash_capacity: Optional[str] = None + self.flash_capacity_bytes: Optional[int] = None + self.flash_count: Optional[int] = None + self.flash_list: Optional[List[Dict[str, Any]]] = None + self.hdd_capacity: Optional[str] = None + self.hdd_capacity_bytes: Optional[int] = None + self.hdd_count: Optional[int] = None + self.hdd_list: Optional[List[Dict[str, Any]]] = None + self.hostname: Optional[str] = None + self.interfaces: Optional[Dict[str, Dict[str, Any]]] = None + self.kernel: Optional[str] = None + self.kernel_parameters: Optional[Dict[str, Any]] = None + self.kernel_security: Optional[Dict[str, str]] = None + self.memory_available_kb: Optional[int] = None + self.memory_free_kb: Optional[int] = None + self.memory_total_kb: Optional[int] = None + self.model: Optional[str] = None + self.nic_count: Optional[int] = None + self.operating_system: Optional[str] = None + self.subscribed: Optional[str] = None + self.system_uptime: Optional[float] = None + self.timestamp: Optional[float] = None + self.vendor: Optional[str] = None + self._valid = False + + def load_facts(self, json_data: Dict[str, Any]) -> None: + + if isinstance(json_data, dict): + keys = json_data.keys() + if all([k in keys for k in self.__dict__ if not k.startswith('_')]): + self._valid = True + for k in json_data.keys(): + if hasattr(self, k): + setattr(self, k, json_data[k]) + else: + self._valid = False + else: + self._valid = False + + def subnet_to_nic(self, subnet: str) -> Optional[str]: + ip_version = ipaddress.ip_network(subnet).version + logger.debug(f"subnet {subnet} is IP version {ip_version}") + interfaces = cast(Dict[str, Dict[str, Any]], self.interfaces) + nic = None + for iface in interfaces.keys(): + addr = '' + if ip_version == 4: + addr = interfaces[iface].get('ipv4_address', '') + else: + addr = interfaces[iface].get('ipv6_address', '') + if addr: + a = addr.split('/')[0] + if ipaddress.ip_address(a) in ipaddress.ip_network(subnet): + nic = iface + break + return nic + + +class SubnetLookup: + def __init__(self, subnet: str, hostname: str, mtu: str, speed: str): + self.subnet = subnet + self.mtu_map = { + mtu: [hostname] + } + self.speed_map = { + speed: [hostname] + } + + @ property + def host_list(self) -> List[str]: + hosts = [] + for mtu in self.mtu_map: + hosts.extend(self.mtu_map.get(mtu, [])) + return hosts + + def update(self, hostname: str, mtu: str, speed: str) -> None: + if mtu in self.mtu_map and hostname not in self.mtu_map[mtu]: + self.mtu_map[mtu].append(hostname) + else: + self.mtu_map[mtu] = [hostname] + + if speed in self.speed_map and hostname not in self.speed_map[speed]: + self.speed_map[speed].append(hostname) + else: + self.speed_map[speed] = [hostname] + + def __repr__(self) -> str: + return json.dumps({ + "subnet": self.subnet, + "mtu_map": self.mtu_map, + "speed_map": self.speed_map + }) + + +class CephadmCheckDefinition: + def __init__(self, mgr: "CephadmOrchestrator", healthcheck_name: str, description: str, name: str, func: Callable) -> None: + self.mgr = mgr + self.log = logger + self.healthcheck_name = healthcheck_name + self.description = description + self.name = name + self.func = func + + @property + def status(self) -> str: + check_states: Dict[str, str] = {} + # Issuing a get each time, since the value could be set at the CLI + raw_states = self.mgr.get_store('config_checks') + if not raw_states: + self.log.error( + "config_checks setting is not defined - unable to determine healthcheck state") + return "Unknown" + + try: + check_states = json.loads(raw_states) + except json.JSONDecodeError: + self.log.error("Unable to serialize the config_checks settings to JSON") + return "Unavailable" + + return check_states.get(self.name, 'Missing') + + def to_json(self) -> Dict[str, Any]: + return { + "healthcheck_name": self.healthcheck_name, + "description": self.description, + "name": self.name, + "status": self.status, + "valid": True if self.func else False + } + + +class CephadmConfigChecks: + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr: "CephadmOrchestrator" = mgr + self.health_checks: List[CephadmCheckDefinition] = [ + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_KERNEL_LSM", + "checks SELINUX/Apparmor profiles are consistent across cluster hosts", + "kernel_security", + self._check_kernel_lsm), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_SUBSCRIPTION", + "checks subscription states are consistent for all cluster hosts", + "os_subscription", + self._check_subscription), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_PUBLIC_MEMBERSHIP", + "check that all hosts have a NIC on the Ceph public_network", + "public_network", + self._check_public_network), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_MTU", + "check that OSD hosts share a common MTU setting", + "osd_mtu_size", + self._check_osd_mtu), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_LINKSPEED", + "check that OSD hosts share a common linkspeed", + "osd_linkspeed", + self._check_osd_linkspeed), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_NETWORK_MISSING", + "checks that the cluster/public networks defined exist on the Ceph hosts", + "network_missing", + self._check_network_missing), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_CEPH_RELEASE", + "check for Ceph version consistency - ceph daemons should be on the same release (unless upgrade is active)", + "ceph_release", + self._check_release_parity), + CephadmCheckDefinition(mgr, "CEPHADM_CHECK_KERNEL_VERSION", + "checks that the MAJ.MIN of the kernel on Ceph hosts is consistent", + "kernel_version", + self._check_kernel_version), + ] + self.log = logger + self.host_facts: Dict[str, HostFacts] = {} + self.subnet_lookup: Dict[str, SubnetLookup] = {} # subnet CIDR -> SubnetLookup Object + self.lsm_to_host: Dict[str, List[str]] = {} + self.subscribed: Dict[str, List[str]] = { + "yes": [], + "no": [], + "unknown": [], + } + self.host_to_role: Dict[str, List[str]] = {} + self.kernel_to_hosts: Dict[str, List[str]] = {} + + self.public_network_list: List[str] = [] + self.cluster_network_list: List[str] = [] + self.health_check_raised = False + self.active_checks: List[str] = [] # checks enabled and executed + self.skipped_checks: List[str] = [] # checks enabled, but skipped due to a pre-req failure + + raw_checks = self.mgr.get_store('config_checks') + if not raw_checks: + # doesn't exist, so seed the checks + self.seed_config_checks() + else: + # setting is there, so ensure there is an entry for each of the checks that + # this module supports (account for upgrades/changes) + try: + config_checks = json.loads(raw_checks) + except json.JSONDecodeError: + self.log.error("Unable to serialize config_checks config. Reset to defaults") + self.seed_config_checks() + else: + # Ensure the config_checks setting is consistent with this module + from_config = set(config_checks.keys()) + from_module = set([c.name for c in self.health_checks]) + old_checks = from_config.difference(from_module) + new_checks = from_module.difference(from_config) + + if old_checks: + self.log.debug(f"old checks being removed from config_checks: {old_checks}") + for i in old_checks: + del config_checks[i] + if new_checks: + self.log.debug(f"new checks being added to config_checks: {new_checks}") + for i in new_checks: + config_checks[i] = 'enabled' + + if old_checks or new_checks: + self.log.info( + f"config_checks updated: {len(old_checks)} removed, {len(new_checks)} added") + self.mgr.set_store('config_checks', json.dumps(config_checks)) + else: + self.log.debug("config_checks match module definition") + + def lookup_check(self, key_value: str, key_name: str = 'name') -> Optional[CephadmCheckDefinition]: + + for c in self.health_checks: + if getattr(c, key_name) == key_value: + return c + return None + + @property + def defined_checks(self) -> int: + return len(self.health_checks) + + @property + def active_checks_count(self) -> int: + return len(self.active_checks) + + def seed_config_checks(self) -> None: + defaults = {check.name: 'enabled' for check in self.health_checks} + self.mgr.set_store('config_checks', json.dumps(defaults)) + + @property + def skipped_checks_count(self) -> int: + return len(self.skipped_checks) + + def to_json(self) -> List[Dict[str, str]]: + return [check.to_json() for check in self.health_checks] + + def load_network_config(self) -> None: + ret, out, _err = self.mgr.check_mon_command({ + 'prefix': 'config dump', + 'format': 'json' + }) + assert ret == 0 + js = json.loads(out) + for item in js: + if item['name'] == "cluster_network": + self.cluster_network_list = item['value'].strip().split(',') + if item['name'] == "public_network": + self.public_network_list = item['value'].strip().split(',') + + self.log.debug(f"public networks {self.public_network_list}") + self.log.debug(f"cluster networks {self.cluster_network_list}") + + def _update_subnet(self, subnet: str, hostname: str, nic: Dict[str, Any]) -> None: + mtu = nic.get('mtu', None) + speed = nic.get('speed', None) + if not mtu or not speed: + return + + this_subnet = self.subnet_lookup.get(subnet, None) + if this_subnet: + this_subnet.update(hostname, mtu, speed) + else: + self.subnet_lookup[subnet] = SubnetLookup(subnet, hostname, mtu, speed) + + def _update_subnet_lookups(self, hostname: str, devname: str, nic: Dict[str, Any]) -> None: + if nic['ipv4_address']: + try: + iface4 = ipaddress.IPv4Interface(nic['ipv4_address']) + subnet = str(iface4.network) + except ipaddress.AddressValueError as e: + self.log.exception(f"Invalid network on {hostname}, interface {devname} : {str(e)}") + else: + self._update_subnet(subnet, hostname, nic) + + if nic['ipv6_address']: + try: + iface6 = ipaddress.IPv6Interface(nic['ipv6_address']) + subnet = str(iface6.network) + except ipaddress.AddressValueError as e: + self.log.exception(f"Invalid network on {hostname}, interface {devname} : {str(e)}") + else: + self._update_subnet(subnet, hostname, nic) + + def hosts_with_role(self, role: str) -> List[str]: + host_list = [] + for hostname, roles in self.host_to_role.items(): + if role in roles: + host_list.append(hostname) + return host_list + + def reset(self) -> None: + self.subnet_lookup.clear() + self.lsm_to_host.clear() + self.subscribed['yes'] = [] + self.subscribed['no'] = [] + self.subscribed['unknown'] = [] + self.host_to_role.clear() + self.kernel_to_hosts.clear() + + def _get_majority(self, data: Dict[str, List[str]]) -> Tuple[str, int]: + assert isinstance(data, dict) + + majority_key = '' + majority_count = 0 + for key in data: + if len(data[key]) > majority_count: + majority_count = len(data[key]) + majority_key = key + return majority_key, majority_count + + def get_ceph_metadata(self) -> Dict[str, Optional[Dict[str, str]]]: + """Build a map of service -> service metadata""" + service_map: Dict[str, Optional[Dict[str, str]]] = {} + + for server in self.mgr.list_servers(): + for service in cast(List[ServiceInfoT], server.get('services', [])): + if service: + service_map.update( + { + f"{service['type']}.{service['id']}": + self.mgr.get_metadata(service['type'], service['id']) + } + ) + return service_map + + def _check_kernel_lsm(self) -> None: + if len(self.lsm_to_host.keys()) > 1: + + majority_hosts_ptr, majority_hosts_count = self._get_majority(self.lsm_to_host) + lsm_copy = self.lsm_to_host.copy() + del lsm_copy[majority_hosts_ptr] + details = [] + for lsm_key in lsm_copy.keys(): + for host in lsm_copy[lsm_key]: + details.append( + f"{host} has inconsistent KSM settings compared to the " + f"majority of hosts({majority_hosts_count}) in the cluster") + host_sfx = 's' if len(details) > 1 else '' + self.mgr.health_checks['CEPHADM_CHECK_KERNEL_LSM'] = { + 'severity': 'warning', + 'summary': f"Kernel Security Module (SELinux/AppArmor) is inconsistent for " + f"{len(details)} host{host_sfx}", + 'count': len(details), + 'detail': details, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_KERNEL_LSM', None) + + def _check_subscription(self) -> None: + if len(self.subscribed['yes']) > 0 and len(self.subscribed['no']) > 0: + # inconsistent subscription states - CEPHADM_CHECK_SUBSCRIPTION + details = [] + for host in self.subscribed['no']: + details.append(f"{host} does not have an active subscription") + self.mgr.health_checks['CEPHADM_CHECK_SUBSCRIPTION'] = { + 'severity': 'warning', + 'summary': f"Support subscriptions inactive on {len(details)} host(s)" + f"({len(self.subscribed['yes'])} subscriptions active)", + 'count': len(details), + 'detail': details, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_SUBSCRIPTION', None) + + def _check_public_network(self) -> None: + hosts_remaining: List[str] = list(self.mgr.cache.facts.keys()) + hosts_removed: List[str] = [] + self.log.debug(f"checking public network membership for: {hosts_remaining}") + + for p_net in self.public_network_list: + self.log.debug(f"checking network {p_net}") + subnet_data = self.subnet_lookup.get(p_net, None) + self.log.debug(f"subnet data - {subnet_data}") + + if subnet_data: + hosts_in_subnet = subnet_data.host_list + for host in hosts_in_subnet: + if host in hosts_remaining: + hosts_remaining.remove(host) + hosts_removed.append(host) + else: + if host not in hosts_removed: + self.log.debug(f"host={host}, subnet={p_net}") + self.log.exception( + "Host listed for a subnet but not present in the host facts?") + + # Ideally all hosts will have been removed since they have an IP on at least + # one of the public networks + if hosts_remaining: + if len(hosts_remaining) != len(self.mgr.cache.facts): + # public network is visible on some hosts + details = [ + f"{host} does not have an interface on any public network" for host in hosts_remaining] + + self.mgr.health_checks['CEPHADM_CHECK_PUBLIC_MEMBERSHIP'] = { + 'severity': 'warning', + 'summary': f"Public network(s) is not directly accessible from {len(hosts_remaining)} " + "cluster hosts", + 'count': len(details), + 'detail': details, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_PUBLIC_MEMBERSHIP', None) + + def _check_osd_mtu(self) -> None: + osd_hosts = set(self.hosts_with_role('osd')) + osd_network_list = self.cluster_network_list or self.public_network_list + mtu_errors: List[str] = [] + + for osd_net in osd_network_list: + subnet_data = self.subnet_lookup.get(osd_net, None) + + if subnet_data: + + self.log.debug(f"processing mtu map : {json.dumps(subnet_data.mtu_map)}") + mtu_count = {} + max_hosts = 0 + mtu_ptr = '' + diffs = {} + for mtu, host_list in subnet_data.mtu_map.items(): + mtu_hosts = set(host_list) + mtu_count[mtu] = len(mtu_hosts) + errors = osd_hosts.difference(mtu_hosts) + if errors: + diffs[mtu] = errors + if len(errors) > max_hosts: + mtu_ptr = mtu + + if diffs: + self.log.debug("MTU problems detected") + self.log.debug(f"most hosts using {mtu_ptr}") + mtu_copy = subnet_data.mtu_map.copy() + del mtu_copy[mtu_ptr] + for bad_mtu in mtu_copy: + for h in mtu_copy[bad_mtu]: + host = HostFacts() + host.load_facts(self.mgr.cache.facts[h]) + mtu_errors.append( + f"host {h}({host.subnet_to_nic(osd_net)}) is using MTU " + f"{bad_mtu} on {osd_net}, NICs on other hosts use {mtu_ptr}") + + if mtu_errors: + self.mgr.health_checks['CEPHADM_CHECK_MTU'] = { + 'severity': 'warning', + 'summary': f"MTU setting inconsistent on osd network NICs on {len(mtu_errors)} host(s)", + 'count': len(mtu_errors), + 'detail': mtu_errors, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_MTU', None) + + def _check_osd_linkspeed(self) -> None: + osd_hosts = set(self.hosts_with_role('osd')) + osd_network_list = self.cluster_network_list or self.public_network_list + + linkspeed_errors = [] + + for osd_net in osd_network_list: + subnet_data = self.subnet_lookup.get(osd_net, None) + + if subnet_data: + + self.log.debug(f"processing subnet : {subnet_data}") + + speed_count = {} + max_hosts = 0 + speed_ptr = '' + diffs = {} + for speed, host_list in subnet_data.speed_map.items(): + speed_hosts = set(host_list) + speed_count[speed] = len(speed_hosts) + errors = osd_hosts.difference(speed_hosts) + if errors: + diffs[speed] = errors + if len(errors) > max_hosts: + speed_ptr = speed + + if diffs: + self.log.debug("linkspeed issue(s) detected") + self.log.debug(f"most hosts using {speed_ptr}") + speed_copy = subnet_data.speed_map.copy() + del speed_copy[speed_ptr] + for bad_speed in speed_copy: + if bad_speed > speed_ptr: + # skip speed is better than most...it can stay! + continue + for h in speed_copy[bad_speed]: + host = HostFacts() + host.load_facts(self.mgr.cache.facts[h]) + linkspeed_errors.append( + f"host {h}({host.subnet_to_nic(osd_net)}) has linkspeed of " + f"{bad_speed} on {osd_net}, NICs on other hosts use {speed_ptr}") + + if linkspeed_errors: + self.mgr.health_checks['CEPHADM_CHECK_LINKSPEED'] = { + 'severity': 'warning', + 'summary': "Link speed is inconsistent on osd network NICs for " + f"{len(linkspeed_errors)} host(s)", + 'count': len(linkspeed_errors), + 'detail': linkspeed_errors, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_LINKSPEED', None) + + def _check_network_missing(self) -> None: + all_networks = self.public_network_list.copy() + all_networks.extend(self.cluster_network_list) + + missing_networks = [] + for subnet in all_networks: + subnet_data = self.subnet_lookup.get(subnet, None) + + if not subnet_data: + missing_networks.append(f"{subnet} not found on any host in the cluster") + self.log.warning( + f"Network {subnet} has been defined, but is not present on any host") + + if missing_networks: + net_sfx = 's' if len(missing_networks) > 1 else '' + self.mgr.health_checks['CEPHADM_CHECK_NETWORK_MISSING'] = { + 'severity': 'warning', + 'summary': f"Public/cluster network{net_sfx} defined, but can not be found on " + "any host", + 'count': len(missing_networks), + 'detail': missing_networks, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_NETWORK_MISSING', None) + + def _check_release_parity(self) -> None: + upgrade_status = self.mgr.upgrade.upgrade_status() + if upgrade_status.in_progress: + # skip version consistency checks during an upgrade cycle + self.skipped_checks.append('ceph_release') + return + + services = self.get_ceph_metadata() + self.log.debug(json.dumps(services)) + version_to_svcs: Dict[str, List[str]] = {} + + for svc in services: + if services[svc]: + metadata = cast(Dict[str, str], services[svc]) + v = metadata.get('ceph_release', '') + if v in version_to_svcs: + version_to_svcs[v].append(svc) + else: + version_to_svcs[v] = [svc] + + if len(version_to_svcs) > 1: + majority_ptr, _majority_count = self._get_majority(version_to_svcs) + ver_copy = version_to_svcs.copy() + del ver_copy[majority_ptr] + details = [] + for v in ver_copy: + for svc in ver_copy[v]: + details.append( + f"{svc} is running {v} (majority of cluster is using {majority_ptr})") + + self.mgr.health_checks['CEPHADM_CHECK_CEPH_RELEASE'] = { + 'severity': 'warning', + 'summary': 'Ceph cluster running mixed ceph releases', + 'count': len(details), + 'detail': details, + } + self.health_check_raised = True + self.log.warning( + f"running with {len(version_to_svcs)} different ceph releases within this cluster") + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_CEPH_RELEASE', None) + + def _check_kernel_version(self) -> None: + if len(self.kernel_to_hosts.keys()) > 1: + majority_hosts_ptr, majority_hosts_count = self._get_majority(self.kernel_to_hosts) + kver_copy = self.kernel_to_hosts.copy() + del kver_copy[majority_hosts_ptr] + details = [] + for k in kver_copy: + for h in kver_copy[k]: + details.append( + f"host {h} running kernel {k}, majority of hosts({majority_hosts_count}) " + f"running {majority_hosts_ptr}") + + self.log.warning("mixed kernel versions detected") + self.mgr.health_checks['CEPHADM_CHECK_KERNEL_VERSION'] = { + 'severity': 'warning', + 'summary': f"{len(details)} host(s) running different kernel versions", + 'count': len(details), + 'detail': details, + } + self.health_check_raised = True + else: + self.mgr.health_checks.pop('CEPHADM_CHECK_KERNEL_VERSION', None) + + def _process_hosts(self) -> None: + self.log.debug(f"processing data from {len(self.mgr.cache.facts)} hosts") + for hostname in self.mgr.cache.facts: + host = HostFacts() + host.load_facts(self.mgr.cache.facts[hostname]) + if not host._valid: + self.log.warning(f"skipping {hostname} - incompatible host facts") + continue + + kernel_lsm = cast(Dict[str, str], host.kernel_security) + lsm_desc = kernel_lsm.get('description', '') + if lsm_desc: + if lsm_desc in self.lsm_to_host: + self.lsm_to_host[lsm_desc].append(hostname) + else: + self.lsm_to_host[lsm_desc] = [hostname] + + subscription_state = host.subscribed.lower() if host.subscribed else None + if subscription_state: + self.subscribed[subscription_state].append(hostname) + + interfaces = cast(Dict[str, Dict[str, Any]], host.interfaces) + for name in interfaces.keys(): + if name in ['lo']: + continue + self._update_subnet_lookups(hostname, name, interfaces[name]) + + if host.kernel: + kernel_maj_min = '.'.join(host.kernel.split('.')[0:2]) + if kernel_maj_min in self.kernel_to_hosts: + self.kernel_to_hosts[kernel_maj_min].append(hostname) + else: + self.kernel_to_hosts[kernel_maj_min] = [hostname] + else: + self.log.warning(f"Host gather facts for {hostname} is missing kernel information") + + # NOTE: if daemondescription had systemd enabled state, we could check for systemd 'tampering' + self.host_to_role[hostname] = list(self.mgr.cache.get_daemon_types(hostname)) + + def run_checks(self) -> None: + checks_enabled = self.mgr.get_module_option('config_checks_enabled') + if checks_enabled is not True: + return + + self.reset() + + check_config: Dict[str, str] = {} + checks_raw: Optional[str] = self.mgr.get_store('config_checks') + if checks_raw: + try: + check_config.update(json.loads(checks_raw)) + except json.JSONDecodeError: + self.log.exception( + "mgr/cephadm/config_checks is not JSON serializable - all checks will run") + + # build lookup "maps" by walking the host facts, once + self._process_hosts() + + self.health_check_raised = False + self.active_checks = [] + self.skipped_checks = [] + + # process all healthchecks that are not explicitly disabled + for health_check in self.health_checks: + if check_config.get(health_check.name, '') != 'disabled': + self.active_checks.append(health_check.name) + health_check.func() + + self.mgr.set_health_checks(self.mgr.health_checks) diff --git a/src/pybind/mgr/cephadm/exchange.py b/src/pybind/mgr/cephadm/exchange.py new file mode 100644 index 000000000..76a613407 --- /dev/null +++ b/src/pybind/mgr/cephadm/exchange.py @@ -0,0 +1,164 @@ +# Data exchange formats for communicating more +# complex data structures between the cephadm binary +# an the mgr module. + +import json + +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + TypeVar, + Union, + cast, +) + + +FuncT = TypeVar("FuncT", bound=Callable) + + +class _DataField: + """A descriptor to map object fields into a data dictionary.""" + + def __init__( + self, + name: Optional[str] = None, + field_type: Optional[FuncT] = None, + ): + self.name = name + self.field_type = field_type + + def __set_name__(self, _: str, name: str) -> None: + if not self.name: + self.name = name + + def __get__(self, obj: Any, objtype: Any = None) -> Any: + return obj.data[self.name] + + def __set__(self, obj: Any, value: Any) -> None: + if self.field_type is not None: + obj.data[self.name] = self.field_type(value) + else: + obj.data[self.name] = value + + +def _get_data(obj: Any) -> Any: + """Wrapper to get underlying data dicts from objects that + advertise having them. + """ + _gd = getattr(obj, "get_data", None) + if _gd: + return _gd() + return obj + + +def _or_none(field_type: FuncT) -> FuncT: + def _field_type_or_none(value: Any) -> Any: + if value is None: + return None + return field_type(value) + + return cast(FuncT, _field_type_or_none) + + +class DeployMeta: + """Deployment metadata. Child of Deploy. Used by cephadm to + determine when certain changes have been made. + """ + + service_name = _DataField(field_type=str) + ports = _DataField(field_type=list) + ip = _DataField(field_type=_or_none(str)) + deployed_by = _DataField(field_type=_or_none(list)) + rank = _DataField(field_type=_or_none(int)) + rank_generation = _DataField(field_type=_or_none(int)) + extra_container_args = _DataField(field_type=_or_none(list)) + extra_entrypoint_args = _DataField(field_type=_or_none(list)) + + def __init__( + self, + init_data: Optional[Dict[str, Any]] = None, + *, + service_name: str = "", + ports: Optional[List[int]] = None, + ip: Optional[str] = None, + deployed_by: Optional[List[str]] = None, + rank: Optional[int] = None, + rank_generation: Optional[int] = None, + extra_container_args: Optional[List[Union[str, Dict[str, Any]]]] = None, + extra_entrypoint_args: Optional[List[Union[str, Dict[str, Any]]]] = None, + ): + self.data = dict(init_data or {}) + # set fields + self.service_name = service_name + self.ports = ports or [] + self.ip = ip + self.deployed_by = deployed_by + self.rank = rank + self.rank_generation = rank_generation + self.extra_container_args = extra_container_args + self.extra_entrypoint_args = extra_entrypoint_args + + def get_data(self) -> Dict[str, Any]: + return self.data + + to_simplified = get_data + + @classmethod + def convert( + cls, + value: Union[Dict[str, Any], "DeployMeta", None], + ) -> "DeployMeta": + if not isinstance(value, DeployMeta): + return cls(value) + return value + + +class Deploy: + """Set of fields that instructs cephadm to deploy a + service/daemon. + """ + + fsid = _DataField(field_type=str) + name = _DataField(field_type=str) + image = _DataField(field_type=str) + deploy_arguments = _DataField(field_type=list) + params = _DataField(field_type=dict) + meta = _DataField(field_type=DeployMeta.convert) + config_blobs = _DataField(field_type=dict) + + def __init__( + self, + init_data: Optional[Dict[str, Any]] = None, + *, + fsid: str = "", + name: str = "", + image: str = "", + deploy_arguments: Optional[List[str]] = None, + params: Optional[Dict[str, Any]] = None, + meta: Optional[DeployMeta] = None, + config_blobs: Optional[Dict[str, Any]] = None, + ): + self.data = dict(init_data or {}) + # set fields + self.fsid = fsid + self.name = name + self.image = image + self.deploy_arguments = deploy_arguments or [] + self.params = params or {} + self.meta = DeployMeta.convert(meta) + self.config_blobs = config_blobs or {} + + def get_data(self) -> Dict[str, Any]: + """Return the underlying data dict.""" + return self.data + + def to_simplified(self) -> Dict[str, Any]: + """Return a simplified serializable version of the object.""" + return {k: _get_data(v) for k, v in self.get_data().items()} + + def dump_json_str(self) -> str: + """Return the object's JSON string representation.""" + return json.dumps(self.to_simplified()) diff --git a/src/pybind/mgr/cephadm/http_server.py b/src/pybind/mgr/cephadm/http_server.py new file mode 100644 index 000000000..ef29d3b4e --- /dev/null +++ b/src/pybind/mgr/cephadm/http_server.py @@ -0,0 +1,101 @@ +import cherrypy +import threading +import logging +from typing import TYPE_CHECKING + +from cephadm.agent import AgentEndpoint +from cephadm.service_discovery import ServiceDiscovery +from mgr_util import test_port_allocation, PortAlreadyInUse +from orchestrator import OrchestratorError + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + + +def cherrypy_filter(record: logging.LogRecord) -> int: + blocked = [ + 'TLSV1_ALERT_DECRYPT_ERROR' + ] + msg = record.getMessage() + return not any([m for m in blocked if m in msg]) + + +logging.getLogger('cherrypy.error').addFilter(cherrypy_filter) +cherrypy.log.access_log.propagate = False + + +class CephadmHttpServer(threading.Thread): + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr = mgr + self.agent = AgentEndpoint(mgr) + self.service_discovery = ServiceDiscovery(mgr) + self.cherrypy_shutdown_event = threading.Event() + self._service_discovery_port = self.mgr.service_discovery_port + self.secure_monitoring_stack = self.mgr.secure_monitoring_stack + super().__init__(target=self.run) + + def configure_cherrypy(self) -> None: + cherrypy.config.update({ + 'environment': 'production', + 'engine.autoreload.on': False, + }) + + def configure(self) -> None: + self.configure_cherrypy() + self.agent.configure() + self.service_discovery.configure(self.mgr.service_discovery_port, + self.mgr.get_mgr_ip(), + self.secure_monitoring_stack) + + def config_update(self) -> None: + self.service_discovery_port = self.mgr.service_discovery_port + if self.secure_monitoring_stack != self.mgr.secure_monitoring_stack: + self.secure_monitoring_stack = self.mgr.secure_monitoring_stack + self.restart() + + @property + def service_discovery_port(self) -> int: + return self._service_discovery_port + + @service_discovery_port.setter + def service_discovery_port(self, value: int) -> None: + if self._service_discovery_port == value: + return + + try: + test_port_allocation(self.mgr.get_mgr_ip(), value) + except PortAlreadyInUse: + raise OrchestratorError(f'Service discovery port {value} is already in use. Listening on old port {self._service_discovery_port}.') + except Exception as e: + raise OrchestratorError(f'Cannot check service discovery port ip:{self.mgr.get_mgr_ip()} port:{value} error:{e}') + + self.mgr.log.info(f'Changing service discovery port from {self._service_discovery_port} to {value}...') + self._service_discovery_port = value + self.restart() + + def restart(self) -> None: + cherrypy.engine.stop() + cherrypy.server.httpserver = None + self.configure() + cherrypy.engine.start() + + def run(self) -> None: + try: + self.mgr.log.debug('Starting cherrypy engine...') + self.configure() + cherrypy.server.unsubscribe() # disable default server + cherrypy.engine.start() + self.mgr.log.debug('Cherrypy engine started.') + self.mgr._kick_serve_loop() + # wait for the shutdown event + self.cherrypy_shutdown_event.wait() + self.cherrypy_shutdown_event.clear() + cherrypy.engine.stop() + cherrypy.server.httpserver = None + self.mgr.log.debug('Cherrypy engine stopped.') + except Exception as e: + self.mgr.log.error(f'Failed to run cephadm http server: {e}') + + def shutdown(self) -> None: + self.mgr.log.debug('Stopping cherrypy engine...') + self.cherrypy_shutdown_event.set() diff --git a/src/pybind/mgr/cephadm/inventory.py b/src/pybind/mgr/cephadm/inventory.py new file mode 100644 index 000000000..7153ca6dc --- /dev/null +++ b/src/pybind/mgr/cephadm/inventory.py @@ -0,0 +1,1565 @@ +import datetime +import enum +from copy import copy +import ipaddress +import itertools +import json +import logging +import math +import socket +from typing import TYPE_CHECKING, Dict, List, Iterator, Optional, Any, Tuple, Set, Mapping, cast, \ + NamedTuple, Type + +import orchestrator +from ceph.deployment import inventory +from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, TunedProfileSpec, IngressSpec +from ceph.utils import str_to_datetime, datetime_to_str, datetime_now +from orchestrator import OrchestratorError, HostSpec, OrchestratorEvent, service_to_daemon_types +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec + +from .utils import resolve_ip, SpecialHostLabels +from .migrations import queue_migrate_nfs_spec, queue_migrate_rgw_spec + +if TYPE_CHECKING: + from .module import CephadmOrchestrator + + +logger = logging.getLogger(__name__) + +HOST_CACHE_PREFIX = "host." +SPEC_STORE_PREFIX = "spec." +AGENT_CACHE_PREFIX = 'agent.' + + +class HostCacheStatus(enum.Enum): + stray = 'stray' + host = 'host' + devices = 'devices' + + +class Inventory: + """ + The inventory stores a HostSpec for all hosts persistently. + """ + + def __init__(self, mgr: 'CephadmOrchestrator'): + self.mgr = mgr + adjusted_addrs = False + + def is_valid_ip(ip: str) -> bool: + try: + ipaddress.ip_address(ip) + return True + except ValueError: + return False + + # load inventory + i = self.mgr.get_store('inventory') + if i: + self._inventory: Dict[str, dict] = json.loads(i) + # handle old clusters missing 'hostname' key from hostspec + for k, v in self._inventory.items(): + if 'hostname' not in v: + v['hostname'] = k + + # convert legacy non-IP addr? + if is_valid_ip(str(v.get('addr'))): + continue + if len(self._inventory) > 1: + if k == socket.gethostname(): + # Never try to resolve our own host! This is + # fraught and can lead to either a loopback + # address (due to podman's futzing with + # /etc/hosts) or a private IP based on the CNI + # configuration. Instead, wait until the mgr + # fails over to another host and let them resolve + # this host. + continue + ip = resolve_ip(cast(str, v.get('addr'))) + else: + # we only have 1 node in the cluster, so we can't + # rely on another host doing the lookup. use the + # IP the mgr binds to. + ip = self.mgr.get_mgr_ip() + if is_valid_ip(ip) and not ip.startswith('127.0.'): + self.mgr.log.info( + f"inventory: adjusted host {v['hostname']} addr '{v['addr']}' -> '{ip}'" + ) + v['addr'] = ip + adjusted_addrs = True + if adjusted_addrs: + self.save() + else: + self._inventory = dict() + self._all_known_names: Dict[str, List[str]] = {} + logger.debug('Loaded inventory %s' % self._inventory) + + def keys(self) -> List[str]: + return list(self._inventory.keys()) + + def __contains__(self, host: str) -> bool: + return host in self._inventory or host in itertools.chain.from_iterable(self._all_known_names.values()) + + def _get_stored_name(self, host: str) -> str: + self.assert_host(host) + if host in self._inventory: + return host + for stored_name, all_names in self._all_known_names.items(): + if host in all_names: + return stored_name + return host + + def update_known_hostnames(self, hostname: str, shortname: str, fqdn: str) -> None: + for hname in [hostname, shortname, fqdn]: + # if we know the host by any of the names, store the full set of names + # in order to be able to check against those names for matching a host + if hname in self._inventory: + self._all_known_names[hname] = [hostname, shortname, fqdn] + return + logger.debug(f'got hostname set from gather-facts for unknown host: {[hostname, shortname, fqdn]}') + + def assert_host(self, host: str) -> None: + if host not in self: + raise OrchestratorError('host %s does not exist' % host) + + def add_host(self, spec: HostSpec) -> None: + if spec.hostname in self: + # addr + if self.get_addr(spec.hostname) != spec.addr: + self.set_addr(spec.hostname, spec.addr) + # labels + for label in spec.labels: + self.add_label(spec.hostname, label) + else: + self._inventory[spec.hostname] = spec.to_json() + self.save() + + def rm_host(self, host: str) -> None: + host = self._get_stored_name(host) + del self._inventory[host] + self._all_known_names.pop(host, []) + self.save() + + def set_addr(self, host: str, addr: str) -> None: + host = self._get_stored_name(host) + self._inventory[host]['addr'] = addr + self.save() + + def add_label(self, host: str, label: str) -> None: + host = self._get_stored_name(host) + + if 'labels' not in self._inventory[host]: + self._inventory[host]['labels'] = list() + if label not in self._inventory[host]['labels']: + self._inventory[host]['labels'].append(label) + self.save() + + def rm_label(self, host: str, label: str) -> None: + host = self._get_stored_name(host) + + if 'labels' not in self._inventory[host]: + self._inventory[host]['labels'] = list() + if label in self._inventory[host]['labels']: + self._inventory[host]['labels'].remove(label) + self.save() + + def has_label(self, host: str, label: str) -> bool: + host = self._get_stored_name(host) + return ( + host in self._inventory + and label in self._inventory[host].get('labels', []) + ) + + def get_addr(self, host: str) -> str: + host = self._get_stored_name(host) + return self._inventory[host].get('addr', host) + + def spec_from_dict(self, info: dict) -> HostSpec: + hostname = info['hostname'] + hostname = self._get_stored_name(hostname) + return HostSpec( + hostname, + addr=info.get('addr', hostname), + labels=info.get('labels', []), + status='Offline' if hostname in self.mgr.offline_hosts else info.get('status', ''), + ) + + def all_specs(self) -> List[HostSpec]: + return list(map(self.spec_from_dict, self._inventory.values())) + + def get_host_with_state(self, state: str = "") -> List[str]: + """return a list of host names in a specific state""" + return [h for h in self._inventory if self._inventory[h].get("status", "").lower() == state] + + def save(self) -> None: + self.mgr.set_store('inventory', json.dumps(self._inventory)) + + +class SpecDescription(NamedTuple): + spec: ServiceSpec + rank_map: Optional[Dict[int, Dict[int, Optional[str]]]] + created: datetime.datetime + deleted: Optional[datetime.datetime] + + +class SpecStore(): + def __init__(self, mgr): + # type: (CephadmOrchestrator) -> None + self.mgr = mgr + self._specs = {} # type: Dict[str, ServiceSpec] + # service_name -> rank -> gen -> daemon_id + self._rank_maps = {} # type: Dict[str, Dict[int, Dict[int, Optional[str]]]] + self.spec_created = {} # type: Dict[str, datetime.datetime] + self.spec_deleted = {} # type: Dict[str, datetime.datetime] + self.spec_preview = {} # type: Dict[str, ServiceSpec] + self._needs_configuration: Dict[str, bool] = {} + + @property + def all_specs(self) -> Mapping[str, ServiceSpec]: + """ + returns active and deleted specs. Returns read-only dict. + """ + return self._specs + + def __contains__(self, name: str) -> bool: + return name in self._specs + + def __getitem__(self, name: str) -> SpecDescription: + if name not in self._specs: + raise OrchestratorError(f'Service {name} not found.') + return SpecDescription(self._specs[name], + self._rank_maps.get(name), + self.spec_created[name], + self.spec_deleted.get(name, None)) + + @property + def active_specs(self) -> Mapping[str, ServiceSpec]: + return {k: v for k, v in self._specs.items() if k not in self.spec_deleted} + + def load(self): + # type: () -> None + for k, v in self.mgr.get_store_prefix(SPEC_STORE_PREFIX).items(): + service_name = k[len(SPEC_STORE_PREFIX):] + try: + j = cast(Dict[str, dict], json.loads(v)) + if ( + (self.mgr.migration_current or 0) < 3 + and j['spec'].get('service_type') == 'nfs' + ): + self.mgr.log.debug(f'found legacy nfs spec {j}') + queue_migrate_nfs_spec(self.mgr, j) + + if ( + (self.mgr.migration_current or 0) < 6 + and j['spec'].get('service_type') == 'rgw' + ): + queue_migrate_rgw_spec(self.mgr, j) + + spec = ServiceSpec.from_json(j['spec']) + created = str_to_datetime(cast(str, j['created'])) + self._specs[service_name] = spec + self.spec_created[service_name] = created + + if 'deleted' in j: + deleted = str_to_datetime(cast(str, j['deleted'])) + self.spec_deleted[service_name] = deleted + + if 'needs_configuration' in j: + self._needs_configuration[service_name] = cast(bool, j['needs_configuration']) + + if 'rank_map' in j and isinstance(j['rank_map'], dict): + self._rank_maps[service_name] = {} + for rank_str, m in j['rank_map'].items(): + try: + rank = int(rank_str) + except ValueError: + logger.exception(f"failed to parse rank in {j['rank_map']}") + continue + if isinstance(m, dict): + self._rank_maps[service_name][rank] = {} + for gen_str, name in m.items(): + try: + gen = int(gen_str) + except ValueError: + logger.exception(f"failed to parse gen in {j['rank_map']}") + continue + if isinstance(name, str) or m is None: + self._rank_maps[service_name][rank][gen] = name + + self.mgr.log.debug('SpecStore: loaded spec for %s' % ( + service_name)) + except Exception as e: + self.mgr.log.warning('unable to load spec for %s: %s' % ( + service_name, e)) + pass + + def save( + self, + spec: ServiceSpec, + update_create: bool = True, + ) -> None: + name = spec.service_name() + if spec.preview_only: + self.spec_preview[name] = spec + return None + self._specs[name] = spec + self._needs_configuration[name] = True + + if update_create: + self.spec_created[name] = datetime_now() + self._save(name) + + def save_rank_map(self, + name: str, + rank_map: Dict[int, Dict[int, Optional[str]]]) -> None: + self._rank_maps[name] = rank_map + self._save(name) + + def _save(self, name: str) -> None: + data: Dict[str, Any] = { + 'spec': self._specs[name].to_json(), + } + if name in self.spec_created: + data['created'] = datetime_to_str(self.spec_created[name]) + if name in self._rank_maps: + data['rank_map'] = self._rank_maps[name] + if name in self.spec_deleted: + data['deleted'] = datetime_to_str(self.spec_deleted[name]) + if name in self._needs_configuration: + data['needs_configuration'] = self._needs_configuration[name] + + self.mgr.set_store( + SPEC_STORE_PREFIX + name, + json.dumps(data, sort_keys=True), + ) + self.mgr.events.for_service(self._specs[name], + OrchestratorEvent.INFO, + 'service was created') + + def rm(self, service_name: str) -> bool: + if service_name not in self._specs: + return False + + if self._specs[service_name].preview_only: + self.finally_rm(service_name) + return True + + self.spec_deleted[service_name] = datetime_now() + self.save(self._specs[service_name], update_create=False) + return True + + def finally_rm(self, service_name): + # type: (str) -> bool + found = service_name in self._specs + if found: + del self._specs[service_name] + if service_name in self._rank_maps: + del self._rank_maps[service_name] + del self.spec_created[service_name] + if service_name in self.spec_deleted: + del self.spec_deleted[service_name] + if service_name in self._needs_configuration: + del self._needs_configuration[service_name] + self.mgr.set_store(SPEC_STORE_PREFIX + service_name, None) + return found + + def get_created(self, spec: ServiceSpec) -> Optional[datetime.datetime]: + return self.spec_created.get(spec.service_name()) + + def set_unmanaged(self, service_name: str, value: bool) -> str: + if service_name not in self._specs: + return f'No service of name {service_name} found. Check "ceph orch ls" for all known services' + if self._specs[service_name].unmanaged == value: + return f'Service {service_name}{" already " if value else " not "}marked unmanaged. No action taken.' + self._specs[service_name].unmanaged = value + self.save(self._specs[service_name]) + return f'Set unmanaged to {str(value)} for service {service_name}' + + def needs_configuration(self, name: str) -> bool: + return self._needs_configuration.get(name, False) + + def mark_needs_configuration(self, name: str) -> None: + if name in self._specs: + self._needs_configuration[name] = True + self._save(name) + else: + self.mgr.log.warning(f'Attempted to mark unknown service "{name}" as needing configuration') + + def mark_configured(self, name: str) -> None: + if name in self._specs: + self._needs_configuration[name] = False + self._save(name) + else: + self.mgr.log.warning(f'Attempted to mark unknown service "{name}" as having been configured') + + +class ClientKeyringSpec(object): + """ + A client keyring file that we should maintain + """ + + def __init__( + self, + entity: str, + placement: PlacementSpec, + mode: Optional[int] = None, + uid: Optional[int] = None, + gid: Optional[int] = None, + ) -> None: + self.entity = entity + self.placement = placement + self.mode = mode or 0o600 + self.uid = uid or 0 + self.gid = gid or 0 + + def validate(self) -> None: + pass + + def to_json(self) -> Dict[str, Any]: + return { + 'entity': self.entity, + 'placement': self.placement.to_json(), + 'mode': self.mode, + 'uid': self.uid, + 'gid': self.gid, + } + + @property + def path(self) -> str: + return f'/etc/ceph/ceph.{self.entity}.keyring' + + @classmethod + def from_json(cls: Type, data: dict) -> 'ClientKeyringSpec': + c = data.copy() + if 'placement' in c: + c['placement'] = PlacementSpec.from_json(c['placement']) + _cls = cls(**c) + _cls.validate() + return _cls + + +class ClientKeyringStore(): + """ + Track client keyring files that we are supposed to maintain + """ + + def __init__(self, mgr): + # type: (CephadmOrchestrator) -> None + self.mgr: CephadmOrchestrator = mgr + self.mgr = mgr + self.keys: Dict[str, ClientKeyringSpec] = {} + + def load(self) -> None: + c = self.mgr.get_store('client_keyrings') or b'{}' + j = json.loads(c) + for e, d in j.items(): + self.keys[e] = ClientKeyringSpec.from_json(d) + + def save(self) -> None: + data = { + k: v.to_json() for k, v in self.keys.items() + } + self.mgr.set_store('client_keyrings', json.dumps(data)) + + def update(self, ks: ClientKeyringSpec) -> None: + self.keys[ks.entity] = ks + self.save() + + def rm(self, entity: str) -> None: + if entity in self.keys: + del self.keys[entity] + self.save() + + +class TunedProfileStore(): + """ + Store for out tuned profile information + """ + + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr: CephadmOrchestrator = mgr + self.mgr = mgr + self.profiles: Dict[str, TunedProfileSpec] = {} + + def __contains__(self, profile: str) -> bool: + return profile in self.profiles + + def load(self) -> None: + c = self.mgr.get_store('tuned_profiles') or b'{}' + j = json.loads(c) + for k, v in j.items(): + self.profiles[k] = TunedProfileSpec.from_json(v) + self.profiles[k]._last_updated = datetime_to_str(datetime_now()) + + def exists(self, profile_name: str) -> bool: + return profile_name in self.profiles + + def save(self) -> None: + profiles_json = {k: v.to_json() for k, v in self.profiles.items()} + self.mgr.set_store('tuned_profiles', json.dumps(profiles_json)) + + def add_setting(self, profile: str, setting: str, value: str) -> None: + if profile in self.profiles: + self.profiles[profile].settings[setting] = value + self.profiles[profile]._last_updated = datetime_to_str(datetime_now()) + self.save() + else: + logger.error( + f'Attempted to set setting "{setting}" for nonexistent os tuning profile "{profile}"') + + def rm_setting(self, profile: str, setting: str) -> None: + if profile in self.profiles: + if setting in self.profiles[profile].settings: + self.profiles[profile].settings.pop(setting, '') + self.profiles[profile]._last_updated = datetime_to_str(datetime_now()) + self.save() + else: + logger.error( + f'Attemped to remove nonexistent setting "{setting}" from os tuning profile "{profile}"') + else: + logger.error( + f'Attempted to remove setting "{setting}" from nonexistent os tuning profile "{profile}"') + + def add_profile(self, spec: TunedProfileSpec) -> None: + spec._last_updated = datetime_to_str(datetime_now()) + self.profiles[spec.profile_name] = spec + self.save() + + def rm_profile(self, profile: str) -> None: + if profile in self.profiles: + self.profiles.pop(profile, TunedProfileSpec('')) + else: + logger.error(f'Attempted to remove nonexistent os tuning profile "{profile}"') + self.save() + + def last_updated(self, profile: str) -> Optional[datetime.datetime]: + if profile not in self.profiles or not self.profiles[profile]._last_updated: + return None + return str_to_datetime(self.profiles[profile]._last_updated) + + def set_last_updated(self, profile: str, new_datetime: datetime.datetime) -> None: + if profile in self.profiles: + self.profiles[profile]._last_updated = datetime_to_str(new_datetime) + + def list_profiles(self) -> List[TunedProfileSpec]: + return [p for p in self.profiles.values()] + + +class HostCache(): + """ + HostCache stores different things: + + 1. `daemons`: Deployed daemons O(daemons) + + They're part of the configuration nowadays and need to be + persistent. The name "daemon cache" is unfortunately a bit misleading. + Like for example we really need to know where daemons are deployed on + hosts that are offline. + + 2. `devices`: ceph-volume inventory cache O(hosts) + + As soon as this is populated, it becomes more or less read-only. + + 3. `networks`: network interfaces for each host. O(hosts) + + This is needed in order to deploy MONs. As this is mostly read-only. + + 4. `last_client_files` O(hosts) + + Stores the last digest and owner/mode for files we've pushed to /etc/ceph + (ceph.conf or client keyrings). + + 5. `scheduled_daemon_actions`: O(daemons) + + Used to run daemon actions after deploying a daemon. We need to + store it persistently, in order to stay consistent across + MGR failovers. + """ + + def __init__(self, mgr): + # type: (CephadmOrchestrator) -> None + self.mgr: CephadmOrchestrator = mgr + self.daemons = {} # type: Dict[str, Dict[str, orchestrator.DaemonDescription]] + self._tmp_daemons = {} # type: Dict[str, Dict[str, orchestrator.DaemonDescription]] + self.last_daemon_update = {} # type: Dict[str, datetime.datetime] + self.devices = {} # type: Dict[str, List[inventory.Device]] + self.facts = {} # type: Dict[str, Dict[str, Any]] + self.last_facts_update = {} # type: Dict[str, datetime.datetime] + self.last_autotune = {} # type: Dict[str, datetime.datetime] + self.osdspec_previews = {} # type: Dict[str, List[Dict[str, Any]]] + self.osdspec_last_applied = {} # type: Dict[str, Dict[str, datetime.datetime]] + self.networks = {} # type: Dict[str, Dict[str, Dict[str, List[str]]]] + self.last_network_update = {} # type: Dict[str, datetime.datetime] + self.last_device_update = {} # type: Dict[str, datetime.datetime] + self.last_device_change = {} # type: Dict[str, datetime.datetime] + self.last_tuned_profile_update = {} # type: Dict[str, datetime.datetime] + self.daemon_refresh_queue = [] # type: List[str] + self.device_refresh_queue = [] # type: List[str] + self.network_refresh_queue = [] # type: List[str] + self.osdspec_previews_refresh_queue = [] # type: List[str] + + # host -> daemon name -> dict + self.daemon_config_deps = {} # type: Dict[str, Dict[str, Dict[str,Any]]] + self.last_host_check = {} # type: Dict[str, datetime.datetime] + self.loading_osdspec_preview = set() # type: Set[str] + self.last_client_files: Dict[str, Dict[str, Tuple[str, int, int, int]]] = {} + self.registry_login_queue: Set[str] = set() + + self.scheduled_daemon_actions: Dict[str, Dict[str, str]] = {} + + self.metadata_up_to_date = {} # type: Dict[str, bool] + + def load(self): + # type: () -> None + for k, v in self.mgr.get_store_prefix(HOST_CACHE_PREFIX).items(): + host = k[len(HOST_CACHE_PREFIX):] + if self._get_host_cache_entry_status(host) != HostCacheStatus.host: + if self._get_host_cache_entry_status(host) == HostCacheStatus.devices: + continue + self.mgr.log.warning('removing stray HostCache host record %s' % ( + host)) + self.mgr.set_store(k, None) + try: + j = json.loads(v) + if 'last_device_update' in j: + self.last_device_update[host] = str_to_datetime(j['last_device_update']) + else: + self.device_refresh_queue.append(host) + if 'last_device_change' in j: + self.last_device_change[host] = str_to_datetime(j['last_device_change']) + # for services, we ignore the persisted last_*_update + # and always trigger a new scrape on mgr restart. + self.daemon_refresh_queue.append(host) + self.network_refresh_queue.append(host) + self.daemons[host] = {} + self.osdspec_previews[host] = [] + self.osdspec_last_applied[host] = {} + self.networks[host] = {} + self.daemon_config_deps[host] = {} + for name, d in j.get('daemons', {}).items(): + self.daemons[host][name] = \ + orchestrator.DaemonDescription.from_json(d) + self.devices[host] = [] + # still want to check old device location for upgrade scenarios + for d in j.get('devices', []): + self.devices[host].append(inventory.Device.from_json(d)) + self.devices[host] += self.load_host_devices(host) + self.networks[host] = j.get('networks_and_interfaces', {}) + self.osdspec_previews[host] = j.get('osdspec_previews', {}) + self.last_client_files[host] = j.get('last_client_files', {}) + for name, ts in j.get('osdspec_last_applied', {}).items(): + self.osdspec_last_applied[host][name] = str_to_datetime(ts) + + for name, d in j.get('daemon_config_deps', {}).items(): + self.daemon_config_deps[host][name] = { + 'deps': d.get('deps', []), + 'last_config': str_to_datetime(d['last_config']), + } + if 'last_host_check' in j: + self.last_host_check[host] = str_to_datetime(j['last_host_check']) + if 'last_tuned_profile_update' in j: + self.last_tuned_profile_update[host] = str_to_datetime( + j['last_tuned_profile_update']) + self.registry_login_queue.add(host) + self.scheduled_daemon_actions[host] = j.get('scheduled_daemon_actions', {}) + self.metadata_up_to_date[host] = j.get('metadata_up_to_date', False) + + self.mgr.log.debug( + 'HostCache.load: host %s has %d daemons, ' + '%d devices, %d networks' % ( + host, len(self.daemons[host]), len(self.devices[host]), + len(self.networks[host]))) + except Exception as e: + self.mgr.log.warning('unable to load cached state for %s: %s' % ( + host, e)) + pass + + def _get_host_cache_entry_status(self, host: str) -> HostCacheStatus: + # return whether a host cache entry in the config-key + # store is for a host, a set of devices or is stray. + # for a host, the entry name will match a hostname in our + # inventory. For devices, it will be formatted + # .devices. where is + # in out inventory. If neither case applies, it is stray + if host in self.mgr.inventory: + return HostCacheStatus.host + try: + # try stripping off the ".devices." and see if we get + # a host name that matches our inventory + actual_host = '.'.join(host.split('.')[:-2]) + return HostCacheStatus.devices if actual_host in self.mgr.inventory else HostCacheStatus.stray + except Exception: + return HostCacheStatus.stray + + def update_host_daemons(self, host, dm): + # type: (str, Dict[str, orchestrator.DaemonDescription]) -> None + self.daemons[host] = dm + self._tmp_daemons.pop(host, {}) + self.last_daemon_update[host] = datetime_now() + + def append_tmp_daemon(self, host: str, dd: orchestrator.DaemonDescription) -> None: + # for storing empty daemon descriptions representing daemons we have + # just deployed but not yet had the chance to pick up in a daemon refresh + # _tmp_daemons is cleared for a host upon receiving a real update of the + # host's dameons + if host not in self._tmp_daemons: + self._tmp_daemons[host] = {} + self._tmp_daemons[host][dd.name()] = dd + + def update_host_facts(self, host, facts): + # type: (str, Dict[str, Dict[str, Any]]) -> None + self.facts[host] = facts + hostnames: List[str] = [] + for k in ['hostname', 'shortname', 'fqdn']: + v = facts.get(k, '') + hostnames.append(v if isinstance(v, str) else '') + self.mgr.inventory.update_known_hostnames(hostnames[0], hostnames[1], hostnames[2]) + self.last_facts_update[host] = datetime_now() + + def update_autotune(self, host: str) -> None: + self.last_autotune[host] = datetime_now() + + def invalidate_autotune(self, host: str) -> None: + if host in self.last_autotune: + del self.last_autotune[host] + + def devices_changed(self, host: str, b: List[inventory.Device]) -> bool: + old_devs = inventory.Devices(self.devices[host]) + new_devs = inventory.Devices(b) + # relying on Devices class __eq__ function here + if old_devs != new_devs: + self.mgr.log.info("Detected new or changed devices on %s" % host) + return True + return False + + def update_host_devices( + self, + host: str, + dls: List[inventory.Device], + ) -> None: + if ( + host not in self.devices + or host not in self.last_device_change + or self.devices_changed(host, dls) + ): + self.last_device_change[host] = datetime_now() + self.last_device_update[host] = datetime_now() + self.devices[host] = dls + + def update_host_networks( + self, + host: str, + nets: Dict[str, Dict[str, List[str]]] + ) -> None: + self.networks[host] = nets + self.last_network_update[host] = datetime_now() + + def update_daemon_config_deps(self, host: str, name: str, deps: List[str], stamp: datetime.datetime) -> None: + self.daemon_config_deps[host][name] = { + 'deps': deps, + 'last_config': stamp, + } + + def update_last_host_check(self, host): + # type: (str) -> None + self.last_host_check[host] = datetime_now() + + def update_osdspec_last_applied(self, host, service_name, ts): + # type: (str, str, datetime.datetime) -> None + self.osdspec_last_applied[host][service_name] = ts + + def update_client_file(self, + host: str, + path: str, + digest: str, + mode: int, + uid: int, + gid: int) -> None: + if host not in self.last_client_files: + self.last_client_files[host] = {} + self.last_client_files[host][path] = (digest, mode, uid, gid) + + def removed_client_file(self, host: str, path: str) -> None: + if ( + host in self.last_client_files + and path in self.last_client_files[host] + ): + del self.last_client_files[host][path] + + def prime_empty_host(self, host): + # type: (str) -> None + """ + Install an empty entry for a host + """ + self.daemons[host] = {} + self.devices[host] = [] + self.networks[host] = {} + self.osdspec_previews[host] = [] + self.osdspec_last_applied[host] = {} + self.daemon_config_deps[host] = {} + self.daemon_refresh_queue.append(host) + self.device_refresh_queue.append(host) + self.network_refresh_queue.append(host) + self.osdspec_previews_refresh_queue.append(host) + self.registry_login_queue.add(host) + self.last_client_files[host] = {} + + def refresh_all_host_info(self, host): + # type: (str) -> None + + self.last_host_check.pop(host, None) + self.daemon_refresh_queue.append(host) + self.registry_login_queue.add(host) + self.device_refresh_queue.append(host) + self.last_facts_update.pop(host, None) + self.osdspec_previews_refresh_queue.append(host) + self.last_autotune.pop(host, None) + + def invalidate_host_daemons(self, host): + # type: (str) -> None + self.daemon_refresh_queue.append(host) + if host in self.last_daemon_update: + del self.last_daemon_update[host] + self.mgr.event.set() + + def invalidate_host_devices(self, host): + # type: (str) -> None + self.device_refresh_queue.append(host) + if host in self.last_device_update: + del self.last_device_update[host] + self.mgr.event.set() + + def invalidate_host_networks(self, host): + # type: (str) -> None + self.network_refresh_queue.append(host) + if host in self.last_network_update: + del self.last_network_update[host] + self.mgr.event.set() + + def distribute_new_registry_login_info(self) -> None: + self.registry_login_queue = set(self.mgr.inventory.keys()) + + def save_host(self, host: str) -> None: + j: Dict[str, Any] = { + 'daemons': {}, + 'devices': [], + 'osdspec_previews': [], + 'osdspec_last_applied': {}, + 'daemon_config_deps': {}, + } + if host in self.last_daemon_update: + j['last_daemon_update'] = datetime_to_str(self.last_daemon_update[host]) + if host in self.last_device_update: + j['last_device_update'] = datetime_to_str(self.last_device_update[host]) + if host in self.last_network_update: + j['last_network_update'] = datetime_to_str(self.last_network_update[host]) + if host in self.last_device_change: + j['last_device_change'] = datetime_to_str(self.last_device_change[host]) + if host in self.last_tuned_profile_update: + j['last_tuned_profile_update'] = datetime_to_str(self.last_tuned_profile_update[host]) + if host in self.daemons: + for name, dd in self.daemons[host].items(): + j['daemons'][name] = dd.to_json() + if host in self.networks: + j['networks_and_interfaces'] = self.networks[host] + if host in self.daemon_config_deps: + for name, depi in self.daemon_config_deps[host].items(): + j['daemon_config_deps'][name] = { + 'deps': depi.get('deps', []), + 'last_config': datetime_to_str(depi['last_config']), + } + if host in self.osdspec_previews and self.osdspec_previews[host]: + j['osdspec_previews'] = self.osdspec_previews[host] + if host in self.osdspec_last_applied: + for name, ts in self.osdspec_last_applied[host].items(): + j['osdspec_last_applied'][name] = datetime_to_str(ts) + + if host in self.last_host_check: + j['last_host_check'] = datetime_to_str(self.last_host_check[host]) + + if host in self.last_client_files: + j['last_client_files'] = self.last_client_files[host] + if host in self.scheduled_daemon_actions: + j['scheduled_daemon_actions'] = self.scheduled_daemon_actions[host] + if host in self.metadata_up_to_date: + j['metadata_up_to_date'] = self.metadata_up_to_date[host] + if host in self.devices: + self.save_host_devices(host) + + self.mgr.set_store(HOST_CACHE_PREFIX + host, json.dumps(j)) + + def save_host_devices(self, host: str) -> None: + if host not in self.devices or not self.devices[host]: + logger.debug(f'Host {host} has no devices to save') + return + + devs: List[Dict[str, Any]] = [] + for d in self.devices[host]: + devs.append(d.to_json()) + + def byte_len(s: str) -> int: + return len(s.encode('utf-8')) + + dev_cache_counter: int = 0 + cache_size: int = self.mgr.get_foreign_ceph_option('mon', 'mon_config_key_max_entry_size') + if cache_size is not None and cache_size != 0 and byte_len(json.dumps(devs)) > cache_size - 1024: + # no guarantee all device entries take up the same amount of space + # splitting it up so there's one more entry than we need should be fairly + # safe and save a lot of extra logic checking sizes + cache_entries_needed = math.ceil(byte_len(json.dumps(devs)) / cache_size) + 1 + dev_sublist_size = math.ceil(len(devs) / cache_entries_needed) + dev_lists: List[List[Dict[str, Any]]] = [devs[i:i + dev_sublist_size] + for i in range(0, len(devs), dev_sublist_size)] + for dev_list in dev_lists: + dev_dict: Dict[str, Any] = {'devices': dev_list} + if dev_cache_counter == 0: + dev_dict.update({'entries': len(dev_lists)}) + self.mgr.set_store(HOST_CACHE_PREFIX + host + '.devices.' + + str(dev_cache_counter), json.dumps(dev_dict)) + dev_cache_counter += 1 + else: + self.mgr.set_store(HOST_CACHE_PREFIX + host + '.devices.' + + str(dev_cache_counter), json.dumps({'devices': devs, 'entries': 1})) + + def load_host_devices(self, host: str) -> List[inventory.Device]: + dev_cache_counter: int = 0 + devs: List[Dict[str, Any]] = [] + dev_entries: int = 0 + try: + # number of entries for the host's devices should be in + # the "entries" field of the first entry + dev_entries = json.loads(self.mgr.get_store( + HOST_CACHE_PREFIX + host + '.devices.0')).get('entries') + except Exception: + logger.debug(f'No device entries found for host {host}') + for i in range(dev_entries): + try: + new_devs = json.loads(self.mgr.get_store( + HOST_CACHE_PREFIX + host + '.devices.' + str(i))).get('devices', []) + if len(new_devs) > 0: + # verify list contains actual device objects by trying to load one from json + inventory.Device.from_json(new_devs[0]) + # if we didn't throw an Exception on above line, we can add the devices + devs = devs + new_devs + dev_cache_counter += 1 + except Exception as e: + logger.error(('Hit exception trying to load devices from ' + + f'{HOST_CACHE_PREFIX + host + ".devices." + str(dev_cache_counter)} in key store: {e}')) + return [] + return [inventory.Device.from_json(d) for d in devs] + + def rm_host(self, host): + # type: (str) -> None + if host in self.daemons: + del self.daemons[host] + if host in self.devices: + del self.devices[host] + if host in self.facts: + del self.facts[host] + if host in self.last_facts_update: + del self.last_facts_update[host] + if host in self.last_autotune: + del self.last_autotune[host] + if host in self.osdspec_previews: + del self.osdspec_previews[host] + if host in self.osdspec_last_applied: + del self.osdspec_last_applied[host] + if host in self.loading_osdspec_preview: + self.loading_osdspec_preview.remove(host) + if host in self.networks: + del self.networks[host] + if host in self.last_daemon_update: + del self.last_daemon_update[host] + if host in self.last_device_update: + del self.last_device_update[host] + if host in self.last_network_update: + del self.last_network_update[host] + if host in self.last_device_change: + del self.last_device_change[host] + if host in self.last_tuned_profile_update: + del self.last_tuned_profile_update[host] + if host in self.daemon_config_deps: + del self.daemon_config_deps[host] + if host in self.scheduled_daemon_actions: + del self.scheduled_daemon_actions[host] + if host in self.last_client_files: + del self.last_client_files[host] + self.mgr.set_store(HOST_CACHE_PREFIX + host, None) + + def get_hosts(self): + # type: () -> List[str] + return list(self.daemons) + + def get_schedulable_hosts(self) -> List[HostSpec]: + """ + Returns all usable hosts that went through _refresh_host_daemons(). + + This mitigates a potential race, where new host was added *after* + ``_refresh_host_daemons()`` was called, but *before* + ``_apply_all_specs()`` was called. thus we end up with a hosts + where daemons might be running, but we have not yet detected them. + """ + return [ + h for h in self.mgr.inventory.all_specs() + if ( + self.host_had_daemon_refresh(h.hostname) + and SpecialHostLabels.DRAIN_DAEMONS not in h.labels + ) + ] + + def get_conf_keyring_available_hosts(self) -> List[HostSpec]: + """ + Returns all hosts without the drain conf and keyrings + label (SpecialHostLabels.DRAIN_CONF_KEYRING) that have + had a refresh. That is equivalent to all hosts we + consider eligible for deployment of conf and keyring files + + Any host without that label is considered fair game for + a client keyring spec to match. However, we want to still + wait for refresh here so that we know what keyrings we've + already deployed here + """ + return [ + h for h in self.mgr.inventory.all_specs() + if ( + self.host_had_daemon_refresh(h.hostname) + and SpecialHostLabels.DRAIN_CONF_KEYRING not in h.labels + ) + ] + + def get_non_draining_hosts(self) -> List[HostSpec]: + """ + Returns all hosts that do not have drain daemon label + (SpecialHostLabels.DRAIN_DAEMONS). + + Useful for the agent who needs this specific list rather than the + schedulable_hosts since the agent needs to be deployed on hosts with + no daemon refresh + """ + return [ + h for h in self.mgr.inventory.all_specs() if SpecialHostLabels.DRAIN_DAEMONS not in h.labels + ] + + def get_draining_hosts(self) -> List[HostSpec]: + """ + Returns all hosts that have the drain daemons label (SpecialHostLabels.DRAIN_DAEMONS) + and therefore should have no daemons placed on them, but are potentially still reachable + """ + return [ + h for h in self.mgr.inventory.all_specs() if SpecialHostLabels.DRAIN_DAEMONS in h.labels + ] + + def get_conf_keyring_draining_hosts(self) -> List[HostSpec]: + """ + Returns all hosts that have drain conf and keyrings label (SpecialHostLabels.DRAIN_CONF_KEYRING) + and therefore should have no config files or client keyring placed on them, but are + potentially still reachable + """ + return [ + h for h in self.mgr.inventory.all_specs() if SpecialHostLabels.DRAIN_CONF_KEYRING in h.labels + ] + + def get_unreachable_hosts(self) -> List[HostSpec]: + """ + Return all hosts that are offline or in maintenance mode. + + The idea is we should not touch the daemons on these hosts (since + in theory the hosts are inaccessible so we CAN'T touch them) but + we still want to count daemons that exist on these hosts toward the + placement so daemons on these hosts aren't just moved elsewhere + """ + return [ + h for h in self.mgr.inventory.all_specs() + if ( + h.status.lower() in ['maintenance', 'offline'] + or h.hostname in self.mgr.offline_hosts + ) + ] + + def is_host_unreachable(self, hostname: str) -> bool: + # take hostname and return if it matches the hostname of an unreachable host + return hostname in [h.hostname for h in self.get_unreachable_hosts()] + + def is_host_schedulable(self, hostname: str) -> bool: + # take hostname and return if it matches the hostname of a schedulable host + return hostname in [h.hostname for h in self.get_schedulable_hosts()] + + def is_host_draining(self, hostname: str) -> bool: + # take hostname and return if it matches the hostname of a draining host + return hostname in [h.hostname for h in self.get_draining_hosts()] + + def get_facts(self, host: str) -> Dict[str, Any]: + return self.facts.get(host, {}) + + def _get_daemons(self) -> Iterator[orchestrator.DaemonDescription]: + for dm in self.daemons.copy().values(): + yield from dm.values() + + def _get_tmp_daemons(self) -> Iterator[orchestrator.DaemonDescription]: + for dm in self._tmp_daemons.copy().values(): + yield from dm.values() + + def get_daemons(self): + # type: () -> List[orchestrator.DaemonDescription] + return list(self._get_daemons()) + + def get_error_daemons(self) -> List[orchestrator.DaemonDescription]: + r = [] + for dd in self._get_daemons(): + if dd.status is not None and dd.status == orchestrator.DaemonDescriptionStatus.error: + r.append(dd) + return r + + def get_daemons_by_host(self, host: str) -> List[orchestrator.DaemonDescription]: + return list(self.daemons.get(host, {}).values()) + + def get_daemon(self, daemon_name: str, host: Optional[str] = None) -> orchestrator.DaemonDescription: + assert not daemon_name.startswith('ha-rgw.') + dds = self.get_daemons_by_host(host) if host else self._get_daemons() + for dd in dds: + if dd.name() == daemon_name: + return dd + + raise orchestrator.OrchestratorError(f'Unable to find {daemon_name} daemon(s)') + + def has_daemon(self, daemon_name: str, host: Optional[str] = None) -> bool: + try: + self.get_daemon(daemon_name, host) + except orchestrator.OrchestratorError: + return False + return True + + def get_daemons_with_volatile_status(self) -> Iterator[Tuple[str, Dict[str, orchestrator.DaemonDescription]]]: + def alter(host: str, dd_orig: orchestrator.DaemonDescription) -> orchestrator.DaemonDescription: + dd = copy(dd_orig) + if host in self.mgr.offline_hosts: + dd.status = orchestrator.DaemonDescriptionStatus.error + dd.status_desc = 'host is offline' + elif self.mgr.inventory._inventory[host].get("status", "").lower() == "maintenance": + # We do not refresh daemons on hosts in maintenance mode, so stored daemon statuses + # could be wrong. We must assume maintenance is working and daemons are stopped + dd.status = orchestrator.DaemonDescriptionStatus.stopped + dd.events = self.mgr.events.get_for_daemon(dd.name()) + return dd + + for host, dm in self.daemons.copy().items(): + yield host, {name: alter(host, d) for name, d in dm.items()} + + def get_daemons_by_service(self, service_name): + # type: (str) -> List[orchestrator.DaemonDescription] + assert not service_name.startswith('keepalived.') + assert not service_name.startswith('haproxy.') + + return list(dd for dd in self._get_daemons() if dd.service_name() == service_name) + + def get_related_service_daemons(self, service_spec: ServiceSpec) -> Optional[List[orchestrator.DaemonDescription]]: + if service_spec.service_type == 'ingress': + dds = list(dd for dd in self._get_daemons() if dd.service_name() == cast(IngressSpec, service_spec).backend_service) + dds += list(dd for dd in self._get_tmp_daemons() if dd.service_name() == cast(IngressSpec, service_spec).backend_service) + logger.debug(f'Found related daemons {dds} for service {service_spec.service_name()}') + return dds + else: + for ingress_spec in [cast(IngressSpec, s) for s in self.mgr.spec_store.active_specs.values() if s.service_type == 'ingress']: + if ingress_spec.backend_service == service_spec.service_name(): + dds = list(dd for dd in self._get_daemons() if dd.service_name() == ingress_spec.service_name()) + dds += list(dd for dd in self._get_tmp_daemons() if dd.service_name() == ingress_spec.service_name()) + logger.debug(f'Found related daemons {dds} for service {service_spec.service_name()}') + return dds + return None + + def get_daemons_by_type(self, service_type: str, host: str = '') -> List[orchestrator.DaemonDescription]: + assert service_type not in ['keepalived', 'haproxy'] + + daemons = self.daemons[host].values() if host else self._get_daemons() + + return [d for d in daemons if d.daemon_type in service_to_daemon_types(service_type)] + + def get_daemon_types(self, hostname: str) -> Set[str]: + """Provide a list of the types of daemons on the host""" + return cast(Set[str], {d.daemon_type for d in self.daemons[hostname].values()}) + + def get_daemon_names(self): + # type: () -> List[str] + return [d.name() for d in self._get_daemons()] + + def get_daemon_last_config_deps(self, host: str, name: str) -> Tuple[Optional[List[str]], Optional[datetime.datetime]]: + if host in self.daemon_config_deps: + if name in self.daemon_config_deps[host]: + return self.daemon_config_deps[host][name].get('deps', []), \ + self.daemon_config_deps[host][name].get('last_config', None) + return None, None + + def get_host_client_files(self, host: str) -> Dict[str, Tuple[str, int, int, int]]: + return self.last_client_files.get(host, {}) + + def host_needs_daemon_refresh(self, host): + # type: (str) -> bool + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Skipping daemon refresh') + return False + if host in self.daemon_refresh_queue: + self.daemon_refresh_queue.remove(host) + return True + cutoff = datetime_now() - datetime.timedelta( + seconds=self.mgr.daemon_cache_timeout) + if host not in self.last_daemon_update or self.last_daemon_update[host] < cutoff: + return True + if not self.mgr.cache.host_metadata_up_to_date(host): + return True + return False + + def host_needs_facts_refresh(self, host): + # type: (str) -> bool + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Skipping gather facts refresh') + return False + cutoff = datetime_now() - datetime.timedelta( + seconds=self.mgr.facts_cache_timeout) + if host not in self.last_facts_update or self.last_facts_update[host] < cutoff: + return True + if not self.mgr.cache.host_metadata_up_to_date(host): + return True + return False + + def host_needs_autotune_memory(self, host): + # type: (str) -> bool + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Skipping autotune') + return False + cutoff = datetime_now() - datetime.timedelta( + seconds=self.mgr.autotune_interval) + if host not in self.last_autotune or self.last_autotune[host] < cutoff: + return True + return False + + def host_needs_tuned_profile_update(self, host: str, profile: str) -> bool: + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Cannot apply tuned profile') + return False + if profile not in self.mgr.tuned_profiles: + logger.debug( + f'Cannot apply tuned profile {profile} on host {host}. Profile does not exist') + return False + if host not in self.last_tuned_profile_update: + return True + last_profile_update = self.mgr.tuned_profiles.last_updated(profile) + if last_profile_update is None: + self.mgr.tuned_profiles.set_last_updated(profile, datetime_now()) + return True + if self.last_tuned_profile_update[host] < last_profile_update: + return True + return False + + def host_had_daemon_refresh(self, host: str) -> bool: + """ + ... at least once. + """ + if host in self.last_daemon_update: + return True + if host not in self.daemons: + return False + return bool(self.daemons[host]) + + def host_needs_device_refresh(self, host): + # type: (str) -> bool + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Skipping device refresh') + return False + if host in self.device_refresh_queue: + self.device_refresh_queue.remove(host) + return True + cutoff = datetime_now() - datetime.timedelta( + seconds=self.mgr.device_cache_timeout) + if host not in self.last_device_update or self.last_device_update[host] < cutoff: + return True + if not self.mgr.cache.host_metadata_up_to_date(host): + return True + return False + + def host_needs_network_refresh(self, host): + # type: (str) -> bool + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Skipping network refresh') + return False + if host in self.network_refresh_queue: + self.network_refresh_queue.remove(host) + return True + cutoff = datetime_now() - datetime.timedelta( + seconds=self.mgr.device_cache_timeout) + if host not in self.last_network_update or self.last_network_update[host] < cutoff: + return True + if not self.mgr.cache.host_metadata_up_to_date(host): + return True + return False + + def host_needs_osdspec_preview_refresh(self, host: str) -> bool: + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Skipping osdspec preview refresh') + return False + if host in self.osdspec_previews_refresh_queue: + self.osdspec_previews_refresh_queue.remove(host) + return True + # Since this is dependent on other factors (device and spec) this does not need + # to be updated periodically. + return False + + def host_needs_check(self, host): + # type: (str) -> bool + cutoff = datetime_now() - datetime.timedelta( + seconds=self.mgr.host_check_interval) + return host not in self.last_host_check or self.last_host_check[host] < cutoff + + def osdspec_needs_apply(self, host: str, spec: ServiceSpec) -> bool: + if ( + host not in self.devices + or host not in self.last_device_change + or host not in self.last_device_update + or host not in self.osdspec_last_applied + or spec.service_name() not in self.osdspec_last_applied[host] + ): + return True + created = self.mgr.spec_store.get_created(spec) + if not created or created > self.last_device_change[host]: + return True + return self.osdspec_last_applied[host][spec.service_name()] < self.last_device_change[host] + + def host_needs_registry_login(self, host: str) -> bool: + if host in self.mgr.offline_hosts: + return False + if host in self.registry_login_queue: + self.registry_login_queue.remove(host) + return True + return False + + def host_metadata_up_to_date(self, host: str) -> bool: + if host not in self.metadata_up_to_date or not self.metadata_up_to_date[host]: + return False + return True + + def all_host_metadata_up_to_date(self) -> bool: + if [h for h in self.get_hosts() if (not self.host_metadata_up_to_date(h) and not self.is_host_unreachable(h))]: + # this function is primarily for telling if it's safe to try and apply a service + # spec. Since offline/maintenance hosts aren't considered in that process anyway + # we don't want to return False if the host without up-to-date metadata is in one + # of those two categories. + return False + return True + + def add_daemon(self, host, dd): + # type: (str, orchestrator.DaemonDescription) -> None + assert host in self.daemons + self.daemons[host][dd.name()] = dd + + def rm_daemon(self, host: str, name: str) -> None: + assert not name.startswith('ha-rgw.') + + if host in self.daemons: + if name in self.daemons[host]: + del self.daemons[host][name] + + def daemon_cache_filled(self) -> bool: + """ + i.e. we have checked the daemons for each hosts at least once. + excluding offline hosts. + + We're not checking for `host_needs_daemon_refresh`, as this might never be + False for all hosts. + """ + return all((self.host_had_daemon_refresh(h) or h in self.mgr.offline_hosts) + for h in self.get_hosts()) + + def schedule_daemon_action(self, host: str, daemon_name: str, action: str) -> None: + assert not daemon_name.startswith('ha-rgw.') + + priorities = { + 'start': 1, + 'restart': 2, + 'reconfig': 3, + 'redeploy': 4, + 'stop': 5, + 'rotate-key': 6, + } + existing_action = self.scheduled_daemon_actions.get(host, {}).get(daemon_name, None) + if existing_action and priorities[existing_action] > priorities[action]: + logger.debug( + f'skipping {action}ing {daemon_name}, cause {existing_action} already scheduled.') + return + + if host not in self.scheduled_daemon_actions: + self.scheduled_daemon_actions[host] = {} + self.scheduled_daemon_actions[host][daemon_name] = action + + def rm_scheduled_daemon_action(self, host: str, daemon_name: str) -> bool: + found = False + if host in self.scheduled_daemon_actions: + if daemon_name in self.scheduled_daemon_actions[host]: + del self.scheduled_daemon_actions[host][daemon_name] + found = True + if not self.scheduled_daemon_actions[host]: + del self.scheduled_daemon_actions[host] + return found + + def get_scheduled_daemon_action(self, host: str, daemon: str) -> Optional[str]: + assert not daemon.startswith('ha-rgw.') + + return self.scheduled_daemon_actions.get(host, {}).get(daemon) + + +class AgentCache(): + """ + AgentCache is used for storing metadata about agent daemons that must be kept + through MGR failovers + """ + + def __init__(self, mgr): + # type: (CephadmOrchestrator) -> None + self.mgr: CephadmOrchestrator = mgr + self.agent_config_deps = {} # type: Dict[str, Dict[str,Any]] + self.agent_counter = {} # type: Dict[str, int] + self.agent_timestamp = {} # type: Dict[str, datetime.datetime] + self.agent_keys = {} # type: Dict[str, str] + self.agent_ports = {} # type: Dict[str, int] + self.sending_agent_message = {} # type: Dict[str, bool] + + def load(self): + # type: () -> None + for k, v in self.mgr.get_store_prefix(AGENT_CACHE_PREFIX).items(): + host = k[len(AGENT_CACHE_PREFIX):] + if host not in self.mgr.inventory: + self.mgr.log.warning('removing stray AgentCache record for agent on %s' % ( + host)) + self.mgr.set_store(k, None) + try: + j = json.loads(v) + self.agent_config_deps[host] = {} + conf_deps = j.get('agent_config_deps', {}) + if conf_deps: + conf_deps['last_config'] = str_to_datetime(conf_deps['last_config']) + self.agent_config_deps[host] = conf_deps + self.agent_counter[host] = int(j.get('agent_counter', 1)) + self.agent_timestamp[host] = str_to_datetime( + j.get('agent_timestamp', datetime_to_str(datetime_now()))) + self.agent_keys[host] = str(j.get('agent_keys', '')) + agent_port = int(j.get('agent_ports', 0)) + if agent_port: + self.agent_ports[host] = agent_port + + except Exception as e: + self.mgr.log.warning('unable to load cached state for agent on host %s: %s' % ( + host, e)) + pass + + def save_agent(self, host: str) -> None: + j: Dict[str, Any] = {} + if host in self.agent_config_deps: + j['agent_config_deps'] = { + 'deps': self.agent_config_deps[host].get('deps', []), + 'last_config': datetime_to_str(self.agent_config_deps[host]['last_config']), + } + if host in self.agent_counter: + j['agent_counter'] = self.agent_counter[host] + if host in self.agent_keys: + j['agent_keys'] = self.agent_keys[host] + if host in self.agent_ports: + j['agent_ports'] = self.agent_ports[host] + if host in self.agent_timestamp: + j['agent_timestamp'] = datetime_to_str(self.agent_timestamp[host]) + + self.mgr.set_store(AGENT_CACHE_PREFIX + host, json.dumps(j)) + + def update_agent_config_deps(self, host: str, deps: List[str], stamp: datetime.datetime) -> None: + self.agent_config_deps[host] = { + 'deps': deps, + 'last_config': stamp, + } + + def get_agent_last_config_deps(self, host: str) -> Tuple[Optional[List[str]], Optional[datetime.datetime]]: + if host in self.agent_config_deps: + return self.agent_config_deps[host].get('deps', []), \ + self.agent_config_deps[host].get('last_config', None) + return None, None + + def messaging_agent(self, host: str) -> bool: + if host not in self.sending_agent_message or not self.sending_agent_message[host]: + return False + return True + + def agent_config_successfully_delivered(self, daemon_spec: CephadmDaemonDeploySpec) -> None: + # agent successfully received new config. Update config/deps + assert daemon_spec.service_name == 'agent' + self.update_agent_config_deps( + daemon_spec.host, daemon_spec.deps, datetime_now()) + self.agent_timestamp[daemon_spec.host] = datetime_now() + self.agent_counter[daemon_spec.host] = 1 + self.save_agent(daemon_spec.host) + + +class EventStore(): + def __init__(self, mgr): + # type: (CephadmOrchestrator) -> None + self.mgr: CephadmOrchestrator = mgr + self.events = {} # type: Dict[str, List[OrchestratorEvent]] + + def add(self, event: OrchestratorEvent) -> None: + + if event.kind_subject() not in self.events: + self.events[event.kind_subject()] = [event] + + for e in self.events[event.kind_subject()]: + if e.message == event.message: + return + + self.events[event.kind_subject()].append(event) + + # limit to five events for now. + self.events[event.kind_subject()] = self.events[event.kind_subject()][-5:] + + def for_service(self, spec: ServiceSpec, level: str, message: str) -> None: + e = OrchestratorEvent(datetime_now(), 'service', + spec.service_name(), level, message) + self.add(e) + + def from_orch_error(self, e: OrchestratorError) -> None: + if e.event_subject is not None: + self.add(OrchestratorEvent( + datetime_now(), + e.event_subject[0], + e.event_subject[1], + "ERROR", + str(e) + )) + + def for_daemon(self, daemon_name: str, level: str, message: str) -> None: + e = OrchestratorEvent(datetime_now(), 'daemon', daemon_name, level, message) + self.add(e) + + def for_daemon_from_exception(self, daemon_name: str, e: Exception) -> None: + self.for_daemon( + daemon_name, + "ERROR", + str(e) + ) + + def cleanup(self) -> None: + # Needs to be properly done, in case events are persistently stored. + + unknowns: List[str] = [] + daemons = self.mgr.cache.get_daemon_names() + specs = self.mgr.spec_store.all_specs.keys() + for k_s, v in self.events.items(): + kind, subject = k_s.split(':') + if kind == 'service': + if subject not in specs: + unknowns.append(k_s) + elif kind == 'daemon': + if subject not in daemons: + unknowns.append(k_s) + + for k_s in unknowns: + del self.events[k_s] + + def get_for_service(self, name: str) -> List[OrchestratorEvent]: + return self.events.get('service:' + name, []) + + def get_for_daemon(self, name: str) -> List[OrchestratorEvent]: + return self.events.get('daemon:' + name, []) diff --git a/src/pybind/mgr/cephadm/migrations.py b/src/pybind/mgr/cephadm/migrations.py new file mode 100644 index 000000000..27f777af6 --- /dev/null +++ b/src/pybind/mgr/cephadm/migrations.py @@ -0,0 +1,441 @@ +import json +import re +import logging +from typing import TYPE_CHECKING, Iterator, Optional, Dict, Any, List + +from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec, RGWSpec +from cephadm.schedule import HostAssignment +from cephadm.utils import SpecialHostLabels +import rados + +from mgr_module import NFS_POOL_NAME +from orchestrator import OrchestratorError, DaemonDescription + +if TYPE_CHECKING: + from .module import CephadmOrchestrator + +LAST_MIGRATION = 6 + +logger = logging.getLogger(__name__) + + +class Migrations: + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr = mgr + + # Why having a global counter, instead of spec versions? + # + # for the first migration: + # The specs don't change in (this) migration. but the scheduler here. + # Adding the version to the specs at this time just felt wrong to me. + # + # And the specs are only another part of cephadm which needs potential upgrades. + # We have the cache, the inventory, the config store, the upgrade (imagine changing the + # upgrade code, while an old upgrade is still in progress), naming of daemons, + # fs-layout of the daemons, etc. + self.set_sane_migration_current() + + v = mgr.get_store('nfs_migration_queue') + self.nfs_migration_queue = json.loads(v) if v else [] + + r = mgr.get_store('rgw_migration_queue') + self.rgw_migration_queue = json.loads(r) if r else [] + + # for some migrations, we don't need to do anything except for + # incrementing migration_current. + # let's try to shortcut things here. + self.migrate(True) + + def set(self, val: int) -> None: + self.mgr.set_module_option('migration_current', val) + self.mgr.migration_current = val + + def set_sane_migration_current(self) -> None: + # migration current should always be an integer + # between 0 and LAST_MIGRATION (inclusive) in order to + # actually carry out migration. If we find + # it is None or too high of a value here we should + # set it to some sane value + mc: Optional[int] = self.mgr.migration_current + if mc is None: + logger.info('Found migration_current of "None". Setting to last migration.') + self.set(LAST_MIGRATION) + return + + if mc > LAST_MIGRATION: + logger.error(f'Found migration_current of {mc} when max should be {LAST_MIGRATION}. Setting back to 0.') + # something has gone wrong and caused migration_current + # to be higher than it should be able to be. Best option + # we have here is to just set it back to 0 + self.set(0) + + def is_migration_ongoing(self) -> bool: + self.set_sane_migration_current() + mc: Optional[int] = self.mgr.migration_current + return mc is None or mc < LAST_MIGRATION + + def verify_no_migration(self) -> None: + if self.is_migration_ongoing(): + # this is raised in module.serve() + raise OrchestratorError( + "cephadm migration still ongoing. Please wait, until the migration is complete.") + + def migrate(self, startup: bool = False) -> None: + if self.mgr.migration_current == 0: + if self.migrate_0_1(): + self.set(1) + + if self.mgr.migration_current == 1: + if self.migrate_1_2(): + self.set(2) + + if self.mgr.migration_current == 2 and not startup: + if self.migrate_2_3(): + self.set(3) + + if self.mgr.migration_current == 3: + if self.migrate_3_4(): + self.set(4) + + if self.mgr.migration_current == 4: + if self.migrate_4_5(): + self.set(5) + + if self.mgr.migration_current == 5: + if self.migrate_5_6(): + self.set(6) + + def migrate_0_1(self) -> bool: + """ + Migration 0 -> 1 + New scheduler that takes PlacementSpec as the bound and not as recommendation. + I.e. the new scheduler won't suggest any new placements outside of the hosts + specified by label etc. + + Which means, we have to make sure, we're not removing any daemons directly after + upgrading to the new scheduler. + + There is a potential race here: + 1. user updates his spec to remove daemons + 2. mgr gets upgraded to new scheduler, before the old scheduler removed the daemon + 3. now, we're converting the spec to explicit placement, thus reverting (1.) + I think this is ok. + """ + + def interesting_specs() -> Iterator[ServiceSpec]: + for s in self.mgr.spec_store.all_specs.values(): + if s.unmanaged: + continue + p = s.placement + if p is None: + continue + if p.count is None: + continue + if not p.hosts and not p.host_pattern and not p.label: + continue + yield s + + def convert_to_explicit(spec: ServiceSpec) -> None: + existing_daemons = self.mgr.cache.get_daemons_by_service(spec.service_name()) + placements, to_add, to_remove = HostAssignment( + spec=spec, + hosts=self.mgr.inventory.all_specs(), + unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), + daemons=existing_daemons, + ).place() + + # We have to migrate, only if the new scheduler would remove daemons + if len(placements) >= len(existing_daemons): + return + + def to_hostname(d: DaemonDescription) -> HostPlacementSpec: + if d.hostname in old_hosts: + return old_hosts[d.hostname] + else: + assert d.hostname + return HostPlacementSpec(d.hostname, '', '') + + old_hosts = {h.hostname: h for h in spec.placement.hosts} + new_hosts = [to_hostname(d) for d in existing_daemons] + + new_placement = PlacementSpec( + hosts=new_hosts, + count=spec.placement.count + ) + + new_spec = ServiceSpec.from_json(spec.to_json()) + new_spec.placement = new_placement + + logger.info(f"Migrating {spec.one_line_str()} to explicit placement") + + self.mgr.spec_store.save(new_spec) + + specs = list(interesting_specs()) + if not specs: + return True # nothing to do. shortcut + + if not self.mgr.cache.daemon_cache_filled(): + logger.info("Unable to migrate yet. Daemon Cache still incomplete.") + return False + + for spec in specs: + convert_to_explicit(spec) + + return True + + def migrate_1_2(self) -> bool: + """ + After 15.2.4, we unified some service IDs: MONs, MGRs etc no longer have a service id. + Which means, the service names changed: + + mon.foo -> mon + mgr.foo -> mgr + + This fixes the data structure consistency + """ + bad_specs = {} + for name, spec in self.mgr.spec_store.all_specs.items(): + if name != spec.service_name(): + bad_specs[name] = (spec.service_name(), spec) + + for old, (new, old_spec) in bad_specs.items(): + if new not in self.mgr.spec_store.all_specs: + spec = old_spec + else: + spec = self.mgr.spec_store.all_specs[new] + spec.unmanaged = True + self.mgr.spec_store.save(spec) + self.mgr.spec_store.finally_rm(old) + + return True + + def migrate_2_3(self) -> bool: + if self.nfs_migration_queue: + from nfs.cluster import create_ganesha_pool + + create_ganesha_pool(self.mgr) + for service_id, pool, ns in self.nfs_migration_queue: + if pool != '.nfs': + self.migrate_nfs_spec(service_id, pool, ns) + self.nfs_migration_queue = [] + self.mgr.log.info('Done migrating all NFS services') + return True + + def migrate_nfs_spec(self, service_id: str, pool: str, ns: Optional[str]) -> None: + renamed = False + if service_id.startswith('ganesha-'): + service_id = service_id[8:] + renamed = True + + self.mgr.log.info( + f'Migrating nfs.{service_id} from legacy pool {pool} namespace {ns}' + ) + + # read exports + ioctx = self.mgr.rados.open_ioctx(pool) + if ns is not None: + ioctx.set_namespace(ns) + object_iterator = ioctx.list_objects() + exports = [] + while True: + try: + obj = object_iterator.__next__() + if obj.key.startswith('export-'): + self.mgr.log.debug(f'reading {obj.key}') + exports.append(obj.read().decode()) + except StopIteration: + break + self.mgr.log.info(f'Found {len(exports)} exports for legacy nfs.{service_id}') + + # copy grace file + if service_id != ns: + try: + grace = ioctx.read("grace") + new_ioctx = self.mgr.rados.open_ioctx(NFS_POOL_NAME) + new_ioctx.set_namespace(service_id) + new_ioctx.write_full("grace", grace) + self.mgr.log.info('Migrated nfs-ganesha grace file') + except rados.ObjectNotFound: + self.mgr.log.debug('failed to read old grace file; skipping') + + if renamed and f'nfs.ganesha-{service_id}' in self.mgr.spec_store: + # rename from nfs.ganesha-* to nfs.*. This will destroy old daemons and + # deploy new ones. + self.mgr.log.info(f'Replacing nfs.ganesha-{service_id} with nfs.{service_id}') + spec = self.mgr.spec_store[f'nfs.ganesha-{service_id}'].spec + self.mgr.spec_store.rm(f'nfs.ganesha-{service_id}') + spec.service_id = service_id + self.mgr.spec_store.save(spec, True) + + # We have to remove the old daemons here as well, otherwise we'll end up with a port conflict. + daemons = [d.name() + for d in self.mgr.cache.get_daemons_by_service(f'nfs.ganesha-{service_id}')] + self.mgr.log.info(f'Removing old nfs.ganesha-{service_id} daemons {daemons}') + self.mgr.remove_daemons(daemons) + else: + # redeploy all ganesha daemons to ensures that the daemon + # cephx are correct AND container configs are set up properly + daemons = [d.name() for d in self.mgr.cache.get_daemons_by_service(f'nfs.{service_id}')] + self.mgr.log.info(f'Removing old nfs.{service_id} daemons {daemons}') + self.mgr.remove_daemons(daemons) + + # re-save service spec (without pool and namespace properties!) + spec = self.mgr.spec_store[f'nfs.{service_id}'].spec + self.mgr.spec_store.save(spec) + + # import exports + for export in exports: + ex = '' + for line in export.splitlines(): + if ( + line.startswith(' secret_access_key =') + or line.startswith(' user_id =') + ): + continue + ex += line + '\n' + self.mgr.log.debug(f'importing export: {ex}') + ret, out, err = self.mgr.mon_command({ + 'prefix': 'nfs export apply', + 'cluster_id': service_id + }, inbuf=ex) + if ret: + self.mgr.log.warning(f'Failed to migrate export ({ret}): {err}\nExport was:\n{ex}') + self.mgr.log.info(f'Done migrating nfs.{service_id}') + + def migrate_3_4(self) -> bool: + # We can't set any host with the _admin label, but we're + # going to warn when calling `ceph orch host rm...` + if 'client.admin' not in self.mgr.keys.keys: + self.mgr._client_keyring_set( + entity='client.admin', + placement=f'label:{SpecialHostLabels.ADMIN}', + ) + return True + + def migrate_4_5(self) -> bool: + registry_url = self.mgr.get_module_option('registry_url') + registry_username = self.mgr.get_module_option('registry_username') + registry_password = self.mgr.get_module_option('registry_password') + if registry_url and registry_username and registry_password: + + registry_credentials = {'url': registry_url, + 'username': registry_username, 'password': registry_password} + self.mgr.set_store('registry_credentials', json.dumps(registry_credentials)) + + self.mgr.set_module_option('registry_url', None) + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': 'mgr', + 'key': 'mgr/cephadm/registry_url', + }) + self.mgr.set_module_option('registry_username', None) + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': 'mgr', + 'key': 'mgr/cephadm/registry_username', + }) + self.mgr.set_module_option('registry_password', None) + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': 'mgr', + 'key': 'mgr/cephadm/registry_password', + }) + + self.mgr.log.info('Done migrating registry login info') + return True + + def migrate_rgw_spec(self, spec: Dict[Any, Any]) -> Optional[RGWSpec]: + """ Migrate an old rgw spec to the new format.""" + new_spec = spec.copy() + field_content: List[str] = re.split(' +', new_spec['spec']['rgw_frontend_type']) + valid_spec = False + if 'beast' in field_content: + new_spec['spec']['rgw_frontend_type'] = 'beast' + field_content.remove('beast') + valid_spec = True + elif 'civetweb' in field_content: + new_spec['spec']['rgw_frontend_type'] = 'civetweb' + field_content.remove('civetweb') + valid_spec = True + else: + # Error: Should not happen as that would be an invalid RGW spec. In that case + # we keep the spec as it, mark it as unmanaged to avoid the daemons being deleted + # and raise a health warning so the user can fix the issue manually later. + self.mgr.log.error("Cannot migrate RGW spec, bad rgw_frontend_type value: {spec['spec']['rgw_frontend_type']}.") + + if valid_spec: + new_spec['spec']['rgw_frontend_extra_args'] = [] + new_spec['spec']['rgw_frontend_extra_args'].extend(field_content) + + return RGWSpec.from_json(new_spec) + + def rgw_spec_needs_migration(self, spec: Dict[Any, Any]) -> bool: + if 'spec' not in spec: + # if users allowed cephadm to set up most of the + # attributes, it's possible there is no "spec" section + # inside the spec. In that case, no migration is needed + return False + return 'rgw_frontend_type' in spec['spec'] \ + and spec['spec']['rgw_frontend_type'] is not None \ + and spec['spec']['rgw_frontend_type'].strip() not in ['beast', 'civetweb'] + + def migrate_5_6(self) -> bool: + """ + Migration 5 -> 6 + + Old RGW spec used to allow 'bad' values on the rgw_frontend_type field. For example + the following value used to be valid: + + rgw_frontend_type: "beast endpoint=10.16.96.54:8043 tcp_nodelay=1" + + As of 17.2.6 release, these kind of entries are not valid anymore and a more strict check + has been added to validate this field. + + This migration logic detects this 'bad' values and tries to transform them to the new + valid format where rgw_frontend_type field can only be either 'beast' or 'civetweb'. + Any extra arguments detected on rgw_frontend_type field will be parsed and passed in the + new spec field rgw_frontend_extra_args. + """ + self.mgr.log.debug(f'Starting rgw migration (queue length is {len(self.rgw_migration_queue)})') + for s in self.rgw_migration_queue: + spec = s['spec'] + if self.rgw_spec_needs_migration(spec): + rgw_spec = self.migrate_rgw_spec(spec) + if rgw_spec is not None: + logger.info(f"Migrating {spec} to new RGW with extra args format {rgw_spec}") + self.mgr.spec_store.save(rgw_spec) + else: + logger.info(f"No Migration is needed for rgw spec: {spec}") + self.rgw_migration_queue = [] + return True + + +def queue_migrate_rgw_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None: + """ + As aprt of 17.2.6 a stricter RGW spec validation has been added so the field + rgw_frontend_type cannot be used to pass rgw-frontends parameters. + """ + service_id = spec_dict['spec']['service_id'] + queued = mgr.get_store('rgw_migration_queue') or '[]' + ls = json.loads(queued) + ls.append(spec_dict) + mgr.set_store('rgw_migration_queue', json.dumps(ls)) + mgr.log.info(f'Queued rgw.{service_id} for migration') + + +def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None: + """ + After 16.2.5 we dropped the NFSServiceSpec pool and namespace properties. + Queue up a migration to process later, once we are sure that RADOS is available + and so on. + """ + service_id = spec_dict['spec']['service_id'] + args = spec_dict['spec'].get('spec', {}) + pool = args.pop('pool', 'nfs-ganesha') + ns = args.pop('namespace', service_id) + queued = mgr.get_store('nfs_migration_queue') or '[]' + ls = json.loads(queued) + ls.append([service_id, pool, ns]) + mgr.set_store('nfs_migration_queue', json.dumps(ls)) + mgr.log.info(f'Queued nfs.{service_id} for migration') diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py new file mode 100644 index 000000000..7b97ce74a --- /dev/null +++ b/src/pybind/mgr/cephadm/module.py @@ -0,0 +1,3405 @@ +import asyncio +import json +import errno +import ipaddress +import logging +import re +import shlex +from collections import defaultdict +from configparser import ConfigParser +from contextlib import contextmanager +from functools import wraps +from tempfile import TemporaryDirectory, NamedTemporaryFile +from threading import Event + +from cephadm.service_discovery import ServiceDiscovery + +import string +from typing import List, Dict, Optional, Callable, Tuple, TypeVar, \ + Any, Set, TYPE_CHECKING, cast, NamedTuple, Sequence, Type, \ + Awaitable, Iterator + +import datetime +import os +import random +import multiprocessing.pool +import subprocess +from prettytable import PrettyTable + +from ceph.deployment import inventory +from ceph.deployment.drive_group import DriveGroupSpec +from ceph.deployment.service_spec import \ + ServiceSpec, PlacementSpec, \ + HostPlacementSpec, IngressSpec, \ + TunedProfileSpec, IscsiServiceSpec +from ceph.utils import str_to_datetime, datetime_to_str, datetime_now +from cephadm.serve import CephadmServe +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec +from cephadm.http_server import CephadmHttpServer +from cephadm.agent import CephadmAgentHelpers + + +from mgr_module import MgrModule, HandleCommandResult, Option, NotifyType +import orchestrator +from orchestrator.module import to_format, Format + +from orchestrator import OrchestratorError, OrchestratorValidationError, HostSpec, \ + CLICommandMeta, DaemonDescription, DaemonDescriptionStatus, handle_orch_error, \ + service_to_daemon_types +from orchestrator._interface import GenericSpec +from orchestrator._interface import daemon_type_to_service + +from . import utils +from . import ssh +from .migrations import Migrations +from .services.cephadmservice import MonService, MgrService, MdsService, RgwService, \ + RbdMirrorService, CrashService, CephadmService, CephfsMirrorService, CephadmAgent, \ + CephExporterService +from .services.ingress import IngressService +from .services.container import CustomContainerService +from .services.iscsi import IscsiService +from .services.nvmeof import NvmeofService +from .services.nfs import NFSService +from .services.osd import OSDRemovalQueue, OSDService, OSD, NotFoundError +from .services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \ + NodeExporterService, SNMPGatewayService, LokiService, PromtailService +from .services.jaeger import ElasticSearchService, JaegerAgentService, JaegerCollectorService, JaegerQueryService +from .schedule import HostAssignment +from .inventory import Inventory, SpecStore, HostCache, AgentCache, EventStore, \ + ClientKeyringStore, ClientKeyringSpec, TunedProfileStore +from .upgrade import CephadmUpgrade +from .template import TemplateMgr +from .utils import CEPH_IMAGE_TYPES, RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES, forall_hosts, \ + cephadmNoImage, CEPH_UPGRADE_ORDER, SpecialHostLabels +from .configchecks import CephadmConfigChecks +from .offline_watcher import OfflineHostWatcher +from .tuned_profiles import TunedProfileUtils + +try: + import asyncssh +except ImportError as e: + asyncssh = None # type: ignore + asyncssh_import_error = str(e) + +logger = logging.getLogger(__name__) + +T = TypeVar('T') + +DEFAULT_SSH_CONFIG = """ +Host * + User root + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + ConnectTimeout=30 +""" + +# cherrypy likes to sys.exit on error. don't let it take us down too! + + +def os_exit_noop(status: int) -> None: + pass + + +os._exit = os_exit_noop # type: ignore + + +# Default container images ----------------------------------------------------- +DEFAULT_IMAGE = 'quay.io/ceph/ceph' # DO NOT ADD TAG TO THIS +DEFAULT_PROMETHEUS_IMAGE = 'quay.io/prometheus/prometheus:v2.43.0' +DEFAULT_NODE_EXPORTER_IMAGE = 'quay.io/prometheus/node-exporter:v1.5.0' +DEFAULT_NVMEOF_IMAGE = 'quay.io/ceph/nvmeof:0.0.2' +DEFAULT_LOKI_IMAGE = 'docker.io/grafana/loki:2.4.0' +DEFAULT_PROMTAIL_IMAGE = 'docker.io/grafana/promtail:2.4.0' +DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.25.0' +DEFAULT_GRAFANA_IMAGE = 'quay.io/ceph/ceph-grafana:9.4.7' +DEFAULT_HAPROXY_IMAGE = 'quay.io/ceph/haproxy:2.3' +DEFAULT_KEEPALIVED_IMAGE = 'quay.io/ceph/keepalived:2.2.4' +DEFAULT_SNMP_GATEWAY_IMAGE = 'docker.io/maxwo/snmp-notifier:v1.2.1' +DEFAULT_ELASTICSEARCH_IMAGE = 'quay.io/omrizeneva/elasticsearch:6.8.23' +DEFAULT_JAEGER_COLLECTOR_IMAGE = 'quay.io/jaegertracing/jaeger-collector:1.29' +DEFAULT_JAEGER_AGENT_IMAGE = 'quay.io/jaegertracing/jaeger-agent:1.29' +DEFAULT_JAEGER_QUERY_IMAGE = 'quay.io/jaegertracing/jaeger-query:1.29' +# ------------------------------------------------------------------------------ + + +def host_exists(hostname_position: int = 1) -> Callable: + """Check that a hostname exists in the inventory""" + def inner(func: Callable) -> Callable: + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + this = args[0] # self object + hostname = args[hostname_position] + if hostname not in this.cache.get_hosts(): + candidates = ','.join([h for h in this.cache.get_hosts() if h.startswith(hostname)]) + help_msg = f"Did you mean {candidates}?" if candidates else "" + raise OrchestratorError( + f"Cannot find host '{hostname}' in the inventory. {help_msg}") + + return func(*args, **kwargs) + return wrapper + return inner + + +class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, + metaclass=CLICommandMeta): + + _STORE_HOST_PREFIX = "host" + + instance = None + NOTIFY_TYPES = [NotifyType.mon_map, NotifyType.pg_summary] + NATIVE_OPTIONS = [] # type: List[Any] + MODULE_OPTIONS = [ + Option( + 'ssh_config_file', + type='str', + default=None, + desc='customized SSH config file to connect to managed hosts', + ), + Option( + 'device_cache_timeout', + type='secs', + default=30 * 60, + desc='seconds to cache device inventory', + ), + Option( + 'device_enhanced_scan', + type='bool', + default=False, + desc='Use libstoragemgmt during device scans', + ), + Option( + 'inventory_list_all', + type='bool', + default=False, + desc='Whether ceph-volume inventory should report ' + 'more devices (mostly mappers (LVs / mpaths), partitions...)', + ), + Option( + 'daemon_cache_timeout', + type='secs', + default=10 * 60, + desc='seconds to cache service (daemon) inventory', + ), + Option( + 'facts_cache_timeout', + type='secs', + default=1 * 60, + desc='seconds to cache host facts data', + ), + Option( + 'host_check_interval', + type='secs', + default=10 * 60, + desc='how frequently to perform a host check', + ), + Option( + 'mode', + type='str', + enum_allowed=['root', 'cephadm-package'], + default='root', + desc='mode for remote execution of cephadm', + ), + Option( + 'container_image_base', + default=DEFAULT_IMAGE, + desc='Container image name, without the tag', + runtime=True, + ), + Option( + 'container_image_prometheus', + default=DEFAULT_PROMETHEUS_IMAGE, + desc='Prometheus container image', + ), + Option( + 'container_image_nvmeof', + default=DEFAULT_NVMEOF_IMAGE, + desc='Nvme-of container image', + ), + Option( + 'container_image_grafana', + default=DEFAULT_GRAFANA_IMAGE, + desc='Prometheus container image', + ), + Option( + 'container_image_alertmanager', + default=DEFAULT_ALERT_MANAGER_IMAGE, + desc='Prometheus container image', + ), + Option( + 'container_image_node_exporter', + default=DEFAULT_NODE_EXPORTER_IMAGE, + desc='Prometheus container image', + ), + Option( + 'container_image_loki', + default=DEFAULT_LOKI_IMAGE, + desc='Loki container image', + ), + Option( + 'container_image_promtail', + default=DEFAULT_PROMTAIL_IMAGE, + desc='Promtail container image', + ), + Option( + 'container_image_haproxy', + default=DEFAULT_HAPROXY_IMAGE, + desc='HAproxy container image', + ), + Option( + 'container_image_keepalived', + default=DEFAULT_KEEPALIVED_IMAGE, + desc='Keepalived container image', + ), + Option( + 'container_image_snmp_gateway', + default=DEFAULT_SNMP_GATEWAY_IMAGE, + desc='SNMP Gateway container image', + ), + Option( + 'container_image_elasticsearch', + default=DEFAULT_ELASTICSEARCH_IMAGE, + desc='elasticsearch container image', + ), + Option( + 'container_image_jaeger_agent', + default=DEFAULT_JAEGER_AGENT_IMAGE, + desc='Jaeger agent container image', + ), + Option( + 'container_image_jaeger_collector', + default=DEFAULT_JAEGER_COLLECTOR_IMAGE, + desc='Jaeger collector container image', + ), + Option( + 'container_image_jaeger_query', + default=DEFAULT_JAEGER_QUERY_IMAGE, + desc='Jaeger query container image', + ), + Option( + 'warn_on_stray_hosts', + type='bool', + default=True, + desc='raise a health warning if daemons are detected on a host ' + 'that is not managed by cephadm', + ), + Option( + 'warn_on_stray_daemons', + type='bool', + default=True, + desc='raise a health warning if daemons are detected ' + 'that are not managed by cephadm', + ), + Option( + 'warn_on_failed_host_check', + type='bool', + default=True, + desc='raise a health warning if the host check fails', + ), + Option( + 'log_to_cluster', + type='bool', + default=True, + desc='log to the "cephadm" cluster log channel"', + ), + Option( + 'allow_ptrace', + type='bool', + default=False, + desc='allow SYS_PTRACE capability on ceph containers', + long_desc='The SYS_PTRACE capability is needed to attach to a ' + 'process with gdb or strace. Enabling this options ' + 'can allow debugging daemons that encounter problems ' + 'at runtime.', + ), + Option( + 'container_init', + type='bool', + default=True, + desc='Run podman/docker with `--init`' + ), + Option( + 'prometheus_alerts_path', + type='str', + default='/etc/prometheus/ceph/ceph_default_alerts.yml', + desc='location of alerts to include in prometheus deployments', + ), + Option( + 'migration_current', + type='int', + default=None, + desc='internal - do not modify', + # used to track spec and other data migrations. + ), + Option( + 'config_dashboard', + type='bool', + default=True, + desc='manage configs like API endpoints in Dashboard.' + ), + Option( + 'manage_etc_ceph_ceph_conf', + type='bool', + default=False, + desc='Manage and own /etc/ceph/ceph.conf on the hosts.', + ), + Option( + 'manage_etc_ceph_ceph_conf_hosts', + type='str', + default='*', + desc='PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf', + ), + # not used anymore + Option( + 'registry_url', + type='str', + default=None, + desc='Registry url for login purposes. This is not the default registry' + ), + Option( + 'registry_username', + type='str', + default=None, + desc='Custom repository username. Only used for logging into a registry.' + ), + Option( + 'registry_password', + type='str', + default=None, + desc='Custom repository password. Only used for logging into a registry.' + ), + #### + Option( + 'registry_insecure', + type='bool', + default=False, + desc='Registry is to be considered insecure (no TLS available). Only for development purposes.' + ), + Option( + 'use_repo_digest', + type='bool', + default=True, + desc='Automatically convert image tags to image digest. Make sure all daemons use the same image', + ), + Option( + 'config_checks_enabled', + type='bool', + default=False, + desc='Enable or disable the cephadm configuration analysis', + ), + Option( + 'default_registry', + type='str', + default='docker.io', + desc='Search-registry to which we should normalize unqualified image names. ' + 'This is not the default registry', + ), + Option( + 'max_count_per_host', + type='int', + default=10, + desc='max number of daemons per service per host', + ), + Option( + 'autotune_memory_target_ratio', + type='float', + default=.7, + desc='ratio of total system memory to divide amongst autotuned daemons' + ), + Option( + 'autotune_interval', + type='secs', + default=10 * 60, + desc='how frequently to autotune daemon memory' + ), + Option( + 'use_agent', + type='bool', + default=False, + desc='Use cephadm agent on each host to gather and send metadata' + ), + Option( + 'agent_refresh_rate', + type='secs', + default=20, + desc='How often agent on each host will try to gather and send metadata' + ), + Option( + 'agent_starting_port', + type='int', + default=4721, + desc='First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)' + ), + Option( + 'agent_down_multiplier', + type='float', + default=3.0, + desc='Multiplied by agent refresh rate to calculate how long agent must not report before being marked down' + ), + Option( + 'max_osd_draining_count', + type='int', + default=10, + desc='max number of osds that will be drained simultaneously when osds are removed' + ), + Option( + 'service_discovery_port', + type='int', + default=8765, + desc='cephadm service discovery port' + ), + Option( + 'cgroups_split', + type='bool', + default=True, + desc='Pass --cgroups=split when cephadm creates containers (currently podman only)' + ), + Option( + 'log_refresh_metadata', + type='bool', + default=False, + desc='Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level' + ), + Option( + 'secure_monitoring_stack', + type='bool', + default=False, + desc='Enable TLS security for all the monitoring stack daemons' + ), + Option( + 'default_cephadm_command_timeout', + type='secs', + default=15 * 60, + desc='Default timeout applied to cephadm commands run directly on ' + 'the host (in seconds)' + ), + ] + + def __init__(self, *args: Any, **kwargs: Any): + super(CephadmOrchestrator, self).__init__(*args, **kwargs) + self._cluster_fsid: str = self.get('mon_map')['fsid'] + self.last_monmap: Optional[datetime.datetime] = None + + # for serve() + self.run = True + self.event = Event() + + self.ssh = ssh.SSHManager(self) + + if self.get_store('pause'): + self.paused = True + else: + self.paused = False + + # for mypy which does not run the code + if TYPE_CHECKING: + self.ssh_config_file = None # type: Optional[str] + self.device_cache_timeout = 0 + self.daemon_cache_timeout = 0 + self.facts_cache_timeout = 0 + self.host_check_interval = 0 + self.max_count_per_host = 0 + self.mode = '' + self.container_image_base = '' + self.container_image_prometheus = '' + self.container_image_nvmeof = '' + self.container_image_grafana = '' + self.container_image_alertmanager = '' + self.container_image_node_exporter = '' + self.container_image_loki = '' + self.container_image_promtail = '' + self.container_image_haproxy = '' + self.container_image_keepalived = '' + self.container_image_snmp_gateway = '' + self.container_image_elasticsearch = '' + self.container_image_jaeger_agent = '' + self.container_image_jaeger_collector = '' + self.container_image_jaeger_query = '' + self.warn_on_stray_hosts = True + self.warn_on_stray_daemons = True + self.warn_on_failed_host_check = True + self.allow_ptrace = False + self.container_init = True + self.prometheus_alerts_path = '' + self.migration_current: Optional[int] = None + self.config_dashboard = True + self.manage_etc_ceph_ceph_conf = True + self.manage_etc_ceph_ceph_conf_hosts = '*' + self.registry_url: Optional[str] = None + self.registry_username: Optional[str] = None + self.registry_password: Optional[str] = None + self.registry_insecure: bool = False + self.use_repo_digest = True + self.default_registry = '' + self.autotune_memory_target_ratio = 0.0 + self.autotune_interval = 0 + self.ssh_user: Optional[str] = None + self._ssh_options: Optional[str] = None + self.tkey = NamedTemporaryFile() + self.ssh_config_fname: Optional[str] = None + self.ssh_config: Optional[str] = None + self._temp_files: List = [] + self.ssh_key: Optional[str] = None + self.ssh_pub: Optional[str] = None + self.ssh_cert: Optional[str] = None + self.use_agent = False + self.agent_refresh_rate = 0 + self.agent_down_multiplier = 0.0 + self.agent_starting_port = 0 + self.service_discovery_port = 0 + self.secure_monitoring_stack = False + self.apply_spec_fails: List[Tuple[str, str]] = [] + self.max_osd_draining_count = 10 + self.device_enhanced_scan = False + self.inventory_list_all = False + self.cgroups_split = True + self.log_refresh_metadata = False + self.default_cephadm_command_timeout = 0 + + self.notify(NotifyType.mon_map, None) + self.config_notify() + + path = self.get_ceph_option('cephadm_path') + try: + assert isinstance(path, str) + with open(path, 'rb') as f: + self._cephadm = f.read() + except (IOError, TypeError) as e: + raise RuntimeError("unable to read cephadm at '%s': %s" % ( + path, str(e))) + + self.cephadm_binary_path = self._get_cephadm_binary_path() + + self._worker_pool = multiprocessing.pool.ThreadPool(10) + + self.ssh._reconfig_ssh() + + CephadmOrchestrator.instance = self + + self.upgrade = CephadmUpgrade(self) + + self.health_checks: Dict[str, dict] = {} + + self.inventory = Inventory(self) + + self.cache = HostCache(self) + self.cache.load() + + self.agent_cache = AgentCache(self) + self.agent_cache.load() + + self.to_remove_osds = OSDRemovalQueue(self) + self.to_remove_osds.load_from_store() + + self.spec_store = SpecStore(self) + self.spec_store.load() + + self.keys = ClientKeyringStore(self) + self.keys.load() + + self.tuned_profiles = TunedProfileStore(self) + self.tuned_profiles.load() + + self.tuned_profile_utils = TunedProfileUtils(self) + + # ensure the host lists are in sync + for h in self.inventory.keys(): + if h not in self.cache.daemons: + self.cache.prime_empty_host(h) + for h in self.cache.get_hosts(): + if h not in self.inventory: + self.cache.rm_host(h) + + # in-memory only. + self.events = EventStore(self) + self.offline_hosts: Set[str] = set() + + self.migration = Migrations(self) + + _service_classes: Sequence[Type[CephadmService]] = [ + OSDService, NFSService, MonService, MgrService, MdsService, + RgwService, RbdMirrorService, GrafanaService, AlertmanagerService, + PrometheusService, NodeExporterService, LokiService, PromtailService, CrashService, IscsiService, + IngressService, CustomContainerService, CephfsMirrorService, NvmeofService, + CephadmAgent, CephExporterService, SNMPGatewayService, ElasticSearchService, + JaegerQueryService, JaegerAgentService, JaegerCollectorService + ] + + # https://github.com/python/mypy/issues/8993 + self.cephadm_services: Dict[str, CephadmService] = { + cls.TYPE: cls(self) for cls in _service_classes} # type: ignore + + self.mgr_service: MgrService = cast(MgrService, self.cephadm_services['mgr']) + self.osd_service: OSDService = cast(OSDService, self.cephadm_services['osd']) + self.iscsi_service: IscsiService = cast(IscsiService, self.cephadm_services['iscsi']) + self.nvmeof_service: NvmeofService = cast(NvmeofService, self.cephadm_services['nvmeof']) + + self.scheduled_async_actions: List[Callable] = [] + + self.template = TemplateMgr(self) + + self.requires_post_actions: Set[str] = set() + self.need_connect_dashboard_rgw = False + + self.config_checker = CephadmConfigChecks(self) + + self.http_server = CephadmHttpServer(self) + self.http_server.start() + self.agent_helpers = CephadmAgentHelpers(self) + if self.use_agent: + self.agent_helpers._apply_agent() + + self.offline_watcher = OfflineHostWatcher(self) + self.offline_watcher.start() + + def shutdown(self) -> None: + self.log.debug('shutdown') + self._worker_pool.close() + self._worker_pool.join() + self.http_server.shutdown() + self.offline_watcher.shutdown() + self.run = False + self.event.set() + + def _get_cephadm_service(self, service_type: str) -> CephadmService: + assert service_type in ServiceSpec.KNOWN_SERVICE_TYPES + return self.cephadm_services[service_type] + + def _get_cephadm_binary_path(self) -> str: + import hashlib + m = hashlib.sha256() + m.update(self._cephadm) + return f'/var/lib/ceph/{self._cluster_fsid}/cephadm.{m.hexdigest()}' + + def _kick_serve_loop(self) -> None: + self.log.debug('_kick_serve_loop') + self.event.set() + + def serve(self) -> None: + """ + The main loop of cephadm. + + A command handler will typically change the declarative state + of cephadm. This loop will then attempt to apply this new state. + """ + # for ssh in serve + self.event_loop = ssh.EventLoopThread() + + serve = CephadmServe(self) + serve.serve() + + def wait_async(self, coro: Awaitable[T], timeout: Optional[int] = None) -> T: + if not timeout: + timeout = self.default_cephadm_command_timeout + # put a lower bound of 60 seconds in case users + # accidentally set it to something unreasonable. + # For example if they though it was in minutes + # rather than seconds + if timeout < 60: + self.log.info(f'Found default timeout set to {timeout}. Instead trying minimum of 60.') + timeout = 60 + return self.event_loop.get_result(coro, timeout) + + @contextmanager + def async_timeout_handler(self, host: Optional[str] = '', + cmd: Optional[str] = '', + timeout: Optional[int] = None) -> Iterator[None]: + # this is meant to catch asyncio.TimeoutError and convert it into an + # OrchestratorError which much of the cephadm codebase is better equipped to handle. + # If the command being run, the host it is run on, or the timeout being used + # are provided, that will be included in the OrchestratorError's message + try: + yield + except asyncio.TimeoutError: + err_str: str = '' + if cmd: + err_str = f'Command "{cmd}" timed out ' + else: + err_str = 'Command timed out ' + if host: + err_str += f'on host {host} ' + if timeout: + err_str += f'(non-default {timeout} second timeout)' + else: + err_str += (f'(default {self.default_cephadm_command_timeout} second timeout)') + raise OrchestratorError(err_str) + + def set_container_image(self, entity: str, image: str) -> None: + self.check_mon_command({ + 'prefix': 'config set', + 'name': 'container_image', + 'value': image, + 'who': entity, + }) + + def config_notify(self) -> None: + """ + This method is called whenever one of our config options is changed. + + TODO: this method should be moved into mgr_module.py + """ + for opt in self.MODULE_OPTIONS: + setattr(self, + opt['name'], # type: ignore + self.get_module_option(opt['name'])) # type: ignore + self.log.debug(' mgr option %s = %s', + opt['name'], getattr(self, opt['name'])) # type: ignore + for opt in self.NATIVE_OPTIONS: + setattr(self, + opt, # type: ignore + self.get_ceph_option(opt)) + self.log.debug(' native option %s = %s', opt, getattr(self, opt)) # type: ignore + + self.event.set() + + def notify(self, notify_type: NotifyType, notify_id: Optional[str]) -> None: + if notify_type == NotifyType.mon_map: + # get monmap mtime so we can refresh configs when mons change + monmap = self.get('mon_map') + self.last_monmap = str_to_datetime(monmap['modified']) + if self.last_monmap and self.last_monmap > datetime_now(): + self.last_monmap = None # just in case clocks are skewed + if getattr(self, 'manage_etc_ceph_ceph_conf', False): + # getattr, due to notify() being called before config_notify() + self._kick_serve_loop() + if notify_type == NotifyType.pg_summary: + self._trigger_osd_removal() + + def _trigger_osd_removal(self) -> None: + remove_queue = self.to_remove_osds.as_osd_ids() + if not remove_queue: + return + data = self.get("osd_stats") + for osd in data.get('osd_stats', []): + if osd.get('num_pgs') == 0: + # if _ANY_ osd that is currently in the queue appears to be empty, + # start the removal process + if int(osd.get('osd')) in remove_queue: + self.log.debug('Found empty osd. Starting removal process') + # if the osd that is now empty is also part of the removal queue + # start the process + self._kick_serve_loop() + + def pause(self) -> None: + if not self.paused: + self.log.info('Paused') + self.set_store('pause', 'true') + self.paused = True + # wake loop so we update the health status + self._kick_serve_loop() + + def resume(self) -> None: + if self.paused: + self.log.info('Resumed') + self.paused = False + self.set_store('pause', None) + # unconditionally wake loop so that 'orch resume' can be used to kick + # cephadm + self._kick_serve_loop() + + def get_unique_name( + self, + daemon_type: str, + host: str, + existing: List[orchestrator.DaemonDescription], + prefix: Optional[str] = None, + forcename: Optional[str] = None, + rank: Optional[int] = None, + rank_generation: Optional[int] = None, + ) -> str: + """ + Generate a unique random service name + """ + suffix = daemon_type not in [ + 'mon', 'crash', 'ceph-exporter', + 'prometheus', 'node-exporter', 'grafana', 'alertmanager', + 'container', 'agent', 'snmp-gateway', 'loki', 'promtail', + 'elasticsearch', 'jaeger-collector', 'jaeger-agent', 'jaeger-query' + ] + if forcename: + if len([d for d in existing if d.daemon_id == forcename]): + raise orchestrator.OrchestratorValidationError( + f'name {daemon_type}.{forcename} already in use') + return forcename + + if '.' in host: + host = host.split('.')[0] + while True: + if prefix: + name = prefix + '.' + else: + name = '' + if rank is not None and rank_generation is not None: + name += f'{rank}.{rank_generation}.' + name += host + if suffix: + name += '.' + ''.join(random.choice(string.ascii_lowercase) + for _ in range(6)) + if len([d for d in existing if d.daemon_id == name]): + if not suffix: + raise orchestrator.OrchestratorValidationError( + f'name {daemon_type}.{name} already in use') + self.log.debug('name %s exists, trying again', name) + continue + return name + + def validate_ssh_config_content(self, ssh_config: Optional[str]) -> None: + if ssh_config is None or len(ssh_config.strip()) == 0: + raise OrchestratorValidationError('ssh_config cannot be empty') + # StrictHostKeyChecking is [yes|no] ? + res = re.findall(r'StrictHostKeyChecking\s+.*', ssh_config) + if not res: + raise OrchestratorValidationError('ssh_config requires StrictHostKeyChecking') + for s in res: + if 'ask' in s.lower(): + raise OrchestratorValidationError(f'ssh_config cannot contain: \'{s}\'') + + def validate_ssh_config_fname(self, ssh_config_fname: str) -> None: + if not os.path.isfile(ssh_config_fname): + raise OrchestratorValidationError("ssh_config \"{}\" does not exist".format( + ssh_config_fname)) + + def _process_ls_output(self, host: str, ls: List[Dict[str, Any]]) -> None: + def _as_datetime(value: Optional[str]) -> Optional[datetime.datetime]: + return str_to_datetime(value) if value is not None else None + + dm = {} + for d in ls: + if not d['style'].startswith('cephadm'): + continue + if d['fsid'] != self._cluster_fsid: + continue + if '.' not in d['name']: + continue + daemon_type = d['name'].split('.')[0] + if daemon_type not in orchestrator.KNOWN_DAEMON_TYPES: + logger.warning(f"Found unknown daemon type {daemon_type} on host {host}") + continue + + container_id = d.get('container_id') + if container_id: + # shorten the hash + container_id = container_id[0:12] + rank = int(d['rank']) if d.get('rank') is not None else None + rank_generation = int(d['rank_generation']) if d.get( + 'rank_generation') is not None else None + status, status_desc = None, 'unknown' + if 'state' in d: + status_desc = d['state'] + status = { + 'running': DaemonDescriptionStatus.running, + 'stopped': DaemonDescriptionStatus.stopped, + 'error': DaemonDescriptionStatus.error, + 'unknown': DaemonDescriptionStatus.error, + }[d['state']] + sd = orchestrator.DaemonDescription( + daemon_type=daemon_type, + daemon_id='.'.join(d['name'].split('.')[1:]), + hostname=host, + container_id=container_id, + container_image_id=d.get('container_image_id'), + container_image_name=d.get('container_image_name'), + container_image_digests=d.get('container_image_digests'), + version=d.get('version'), + status=status, + status_desc=status_desc, + created=_as_datetime(d.get('created')), + started=_as_datetime(d.get('started')), + last_refresh=datetime_now(), + last_configured=_as_datetime(d.get('last_configured')), + last_deployed=_as_datetime(d.get('last_deployed')), + memory_usage=d.get('memory_usage'), + memory_request=d.get('memory_request'), + memory_limit=d.get('memory_limit'), + cpu_percentage=d.get('cpu_percentage'), + service_name=d.get('service_name'), + ports=d.get('ports'), + ip=d.get('ip'), + deployed_by=d.get('deployed_by'), + rank=rank, + rank_generation=rank_generation, + extra_container_args=d.get('extra_container_args'), + extra_entrypoint_args=d.get('extra_entrypoint_args'), + ) + dm[sd.name()] = sd + self.log.debug('Refreshed host %s daemons (%d)' % (host, len(dm))) + self.cache.update_host_daemons(host, dm) + self.cache.save_host(host) + return None + + def update_watched_hosts(self) -> None: + # currently, we are watching hosts with nfs daemons + hosts_to_watch = [d.hostname for d in self.cache.get_daemons( + ) if d.daemon_type in RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES] + self.offline_watcher.set_hosts(list(set([h for h in hosts_to_watch if h is not None]))) + + def offline_hosts_remove(self, host: str) -> None: + if host in self.offline_hosts: + self.offline_hosts.remove(host) + + def update_failed_daemon_health_check(self) -> None: + failed_daemons = [] + for dd in self.cache.get_error_daemons(): + if dd.daemon_type != 'agent': # agents tracked by CEPHADM_AGENT_DOWN + failed_daemons.append('daemon %s on %s is in %s state' % ( + dd.name(), dd.hostname, dd.status_desc + )) + self.remove_health_warning('CEPHADM_FAILED_DAEMON') + if failed_daemons: + self.set_health_warning('CEPHADM_FAILED_DAEMON', f'{len(failed_daemons)} failed cephadm daemon(s)', len( + failed_daemons), failed_daemons) + + @staticmethod + def can_run() -> Tuple[bool, str]: + if asyncssh is not None: + return True, "" + else: + return False, "loading asyncssh library:{}".format( + asyncssh_import_error) + + def available(self) -> Tuple[bool, str, Dict[str, Any]]: + """ + The cephadm orchestrator is always available. + """ + ok, err = self.can_run() + if not ok: + return ok, err, {} + if not self.ssh_key or not self.ssh_pub: + return False, 'SSH keys not set. Use `ceph cephadm set-priv-key` and `ceph cephadm set-pub-key` or `ceph cephadm generate-key`', {} + + # mypy is unable to determine type for _processes since it's private + worker_count: int = self._worker_pool._processes # type: ignore + ret = { + "workers": worker_count, + "paused": self.paused, + } + + return True, err, ret + + def _validate_and_set_ssh_val(self, what: str, new: Optional[str], old: Optional[str]) -> None: + self.set_store(what, new) + self.ssh._reconfig_ssh() + if self.cache.get_hosts(): + # Can't check anything without hosts + host = self.cache.get_hosts()[0] + r = CephadmServe(self)._check_host(host) + if r is not None: + # connection failed reset user + self.set_store(what, old) + self.ssh._reconfig_ssh() + raise OrchestratorError('ssh connection %s@%s failed' % (self.ssh_user, host)) + self.log.info(f'Set ssh {what}') + + @orchestrator._cli_write_command( + prefix='cephadm set-ssh-config') + def _set_ssh_config(self, inbuf: Optional[str] = None) -> Tuple[int, str, str]: + """ + Set the ssh_config file (use -i ) + """ + # Set an ssh_config file provided from stdin + + old = self.ssh_config + if inbuf == old: + return 0, "value unchanged", "" + self.validate_ssh_config_content(inbuf) + self._validate_and_set_ssh_val('ssh_config', inbuf, old) + return 0, "", "" + + @orchestrator._cli_write_command('cephadm clear-ssh-config') + def _clear_ssh_config(self) -> Tuple[int, str, str]: + """ + Clear the ssh_config file + """ + # Clear the ssh_config file provided from stdin + self.set_store("ssh_config", None) + self.ssh_config_tmp = None + self.log.info('Cleared ssh_config') + self.ssh._reconfig_ssh() + return 0, "", "" + + @orchestrator._cli_read_command('cephadm get-ssh-config') + def _get_ssh_config(self) -> HandleCommandResult: + """ + Returns the ssh config as used by cephadm + """ + if self.ssh_config_file: + self.validate_ssh_config_fname(self.ssh_config_file) + with open(self.ssh_config_file) as f: + return HandleCommandResult(stdout=f.read()) + ssh_config = self.get_store("ssh_config") + if ssh_config: + return HandleCommandResult(stdout=ssh_config) + return HandleCommandResult(stdout=DEFAULT_SSH_CONFIG) + + @orchestrator._cli_write_command('cephadm generate-key') + def _generate_key(self) -> Tuple[int, str, str]: + """ + Generate a cluster SSH key (if not present) + """ + if not self.ssh_pub or not self.ssh_key: + self.log.info('Generating ssh key...') + tmp_dir = TemporaryDirectory() + path = tmp_dir.name + '/key' + try: + subprocess.check_call([ + '/usr/bin/ssh-keygen', + '-C', 'ceph-%s' % self._cluster_fsid, + '-N', '', + '-f', path + ]) + with open(path, 'r') as f: + secret = f.read() + with open(path + '.pub', 'r') as f: + pub = f.read() + finally: + os.unlink(path) + os.unlink(path + '.pub') + tmp_dir.cleanup() + self.set_store('ssh_identity_key', secret) + self.set_store('ssh_identity_pub', pub) + self.ssh._reconfig_ssh() + return 0, '', '' + + @orchestrator._cli_write_command( + 'cephadm set-priv-key') + def _set_priv_key(self, inbuf: Optional[str] = None) -> Tuple[int, str, str]: + """Set cluster SSH private key (use -i )""" + if inbuf is None or len(inbuf) == 0: + return -errno.EINVAL, "", "empty private ssh key provided" + old = self.ssh_key + if inbuf == old: + return 0, "value unchanged", "" + self._validate_and_set_ssh_val('ssh_identity_key', inbuf, old) + self.log.info('Set ssh private key') + return 0, "", "" + + @orchestrator._cli_write_command( + 'cephadm set-pub-key') + def _set_pub_key(self, inbuf: Optional[str] = None) -> Tuple[int, str, str]: + """Set cluster SSH public key (use -i )""" + if inbuf is None or len(inbuf) == 0: + return -errno.EINVAL, "", "empty public ssh key provided" + old = self.ssh_pub + if inbuf == old: + return 0, "value unchanged", "" + self._validate_and_set_ssh_val('ssh_identity_pub', inbuf, old) + return 0, "", "" + + @orchestrator._cli_write_command( + 'cephadm set-signed-cert') + def _set_signed_cert(self, inbuf: Optional[str] = None) -> Tuple[int, str, str]: + """Set a signed cert if CA signed keys are being used (use -i )""" + if inbuf is None or len(inbuf) == 0: + return -errno.EINVAL, "", "empty cert file provided" + old = self.ssh_cert + if inbuf == old: + return 0, "value unchanged", "" + self._validate_and_set_ssh_val('ssh_identity_cert', inbuf, old) + return 0, "", "" + + @orchestrator._cli_write_command( + 'cephadm clear-key') + def _clear_key(self) -> Tuple[int, str, str]: + """Clear cluster SSH key""" + self.set_store('ssh_identity_key', None) + self.set_store('ssh_identity_pub', None) + self.set_store('ssh_identity_cert', None) + self.ssh._reconfig_ssh() + self.log.info('Cleared cluster SSH key') + return 0, '', '' + + @orchestrator._cli_read_command( + 'cephadm get-pub-key') + def _get_pub_key(self) -> Tuple[int, str, str]: + """Show SSH public key for connecting to cluster hosts""" + if self.ssh_pub: + return 0, self.ssh_pub, '' + else: + return -errno.ENOENT, '', 'No cluster SSH key defined' + + @orchestrator._cli_read_command( + 'cephadm get-signed-cert') + def _get_signed_cert(self) -> Tuple[int, str, str]: + """Show SSH signed cert for connecting to cluster hosts using CA signed keys""" + if self.ssh_cert: + return 0, self.ssh_cert, '' + else: + return -errno.ENOENT, '', 'No signed cert defined' + + @orchestrator._cli_read_command( + 'cephadm get-user') + def _get_user(self) -> Tuple[int, str, str]: + """ + Show user for SSHing to cluster hosts + """ + if self.ssh_user is None: + return -errno.ENOENT, '', 'No cluster SSH user configured' + else: + return 0, self.ssh_user, '' + + @orchestrator._cli_read_command( + 'cephadm set-user') + def set_ssh_user(self, user: str) -> Tuple[int, str, str]: + """ + Set user for SSHing to cluster hosts, passwordless sudo will be needed for non-root users + """ + current_user = self.ssh_user + if user == current_user: + return 0, "value unchanged", "" + + self._validate_and_set_ssh_val('ssh_user', user, current_user) + current_ssh_config = self._get_ssh_config() + new_ssh_config = re.sub(r"(\s{2}User\s)(.*)", r"\1" + user, current_ssh_config.stdout) + self._set_ssh_config(new_ssh_config) + + msg = 'ssh user set to %s' % user + if user != 'root': + msg += '. sudo will be used' + self.log.info(msg) + return 0, msg, '' + + @orchestrator._cli_read_command( + 'cephadm registry-login') + def registry_login(self, url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, inbuf: Optional[str] = None) -> Tuple[int, str, str]: + """ + Set custom registry login info by providing url, username and password or json file with login info (-i ) + """ + # if password not given in command line, get it through file input + if not (url and username and password) and (inbuf is None or len(inbuf) == 0): + return -errno.EINVAL, "", ("Invalid arguments. Please provide arguments " + "or -i ") + elif (url and username and password): + registry_json = {'url': url, 'username': username, 'password': password} + else: + assert isinstance(inbuf, str) + registry_json = json.loads(inbuf) + if "url" not in registry_json or "username" not in registry_json or "password" not in registry_json: + return -errno.EINVAL, "", ("json provided for custom registry login did not include all necessary fields. " + "Please setup json file as\n" + "{\n" + " \"url\": \"REGISTRY_URL\",\n" + " \"username\": \"REGISTRY_USERNAME\",\n" + " \"password\": \"REGISTRY_PASSWORD\"\n" + "}\n") + + # verify login info works by attempting login on random host + host = None + for host_name in self.inventory.keys(): + host = host_name + break + if not host: + raise OrchestratorError('no hosts defined') + with self.async_timeout_handler(host, 'cephadm registry-login'): + r = self.wait_async(CephadmServe(self)._registry_login(host, registry_json)) + if r is not None: + return 1, '', r + # if logins succeeded, store info + self.log.debug("Host logins successful. Storing login info.") + self.set_store('registry_credentials', json.dumps(registry_json)) + # distribute new login info to all hosts + self.cache.distribute_new_registry_login_info() + return 0, "registry login scheduled", '' + + @orchestrator._cli_read_command('cephadm check-host') + def check_host(self, host: str, addr: Optional[str] = None) -> Tuple[int, str, str]: + """Check whether we can access and manage a remote host""" + try: + with self.async_timeout_handler(host, f'cephadm check-host --expect-hostname {host}'): + out, err, code = self.wait_async( + CephadmServe(self)._run_cephadm( + host, cephadmNoImage, 'check-host', ['--expect-hostname', host], + addr=addr, error_ok=True, no_fsid=True)) + if code: + return 1, '', ('check-host failed:\n' + '\n'.join(err)) + except ssh.HostConnectionError as e: + self.log.exception( + f"check-host failed for '{host}' at addr ({e.addr}) due to connection failure: {str(e)}") + return 1, '', ('check-host failed:\n' + + f"Failed to connect to {host} at address ({e.addr}): {str(e)}") + except OrchestratorError: + self.log.exception(f"check-host failed for '{host}'") + return 1, '', ('check-host failed:\n' + + f"Host '{host}' not found. Use 'ceph orch host ls' to see all managed hosts.") + # if we have an outstanding health alert for this host, give the + # serve thread a kick + if 'CEPHADM_HOST_CHECK_FAILED' in self.health_checks: + for item in self.health_checks['CEPHADM_HOST_CHECK_FAILED']['detail']: + if item.startswith('host %s ' % host): + self.event.set() + return 0, '%s (%s) ok' % (host, addr), '\n'.join(err) + + @orchestrator._cli_read_command( + 'cephadm prepare-host') + def _prepare_host(self, host: str, addr: Optional[str] = None) -> Tuple[int, str, str]: + """Prepare a remote host for use with cephadm""" + with self.async_timeout_handler(host, 'cephadm prepare-host'): + out, err, code = self.wait_async( + CephadmServe(self)._run_cephadm( + host, cephadmNoImage, 'prepare-host', ['--expect-hostname', host], + addr=addr, error_ok=True, no_fsid=True)) + if code: + return 1, '', ('prepare-host failed:\n' + '\n'.join(err)) + # if we have an outstanding health alert for this host, give the + # serve thread a kick + if 'CEPHADM_HOST_CHECK_FAILED' in self.health_checks: + for item in self.health_checks['CEPHADM_HOST_CHECK_FAILED']['detail']: + if item.startswith('host %s ' % host): + self.event.set() + return 0, '%s (%s) ok' % (host, addr), '\n'.join(err) + + @orchestrator._cli_write_command( + prefix='cephadm set-extra-ceph-conf') + def _set_extra_ceph_conf(self, inbuf: Optional[str] = None) -> HandleCommandResult: + """ + Text that is appended to all daemon's ceph.conf. + Mainly a workaround, till `config generate-minimal-conf` generates + a complete ceph.conf. + + Warning: this is a dangerous operation. + """ + if inbuf: + # sanity check. + cp = ConfigParser() + cp.read_string(inbuf, source='') + + self.set_store("extra_ceph_conf", json.dumps({ + 'conf': inbuf, + 'last_modified': datetime_to_str(datetime_now()) + })) + self.log.info('Set extra_ceph_conf') + self._kick_serve_loop() + return HandleCommandResult() + + @orchestrator._cli_read_command( + 'cephadm get-extra-ceph-conf') + def _get_extra_ceph_conf(self) -> HandleCommandResult: + """ + Get extra ceph conf that is appended + """ + return HandleCommandResult(stdout=self.extra_ceph_conf().conf) + + @orchestrator._cli_read_command('cephadm config-check ls') + def _config_checks_list(self, format: Format = Format.plain) -> HandleCommandResult: + """List the available configuration checks and their current state""" + + if format not in [Format.plain, Format.json, Format.json_pretty]: + return HandleCommandResult( + retval=1, + stderr="Requested format is not supported when listing configuration checks" + ) + + if format in [Format.json, Format.json_pretty]: + return HandleCommandResult( + stdout=to_format(self.config_checker.health_checks, + format, + many=True, + cls=None)) + + # plain formatting + table = PrettyTable( + ['NAME', + 'HEALTHCHECK', + 'STATUS', + 'DESCRIPTION' + ], border=False) + table.align['NAME'] = 'l' + table.align['HEALTHCHECK'] = 'l' + table.align['STATUS'] = 'l' + table.align['DESCRIPTION'] = 'l' + table.left_padding_width = 0 + table.right_padding_width = 2 + for c in self.config_checker.health_checks: + table.add_row(( + c.name, + c.healthcheck_name, + c.status, + c.description, + )) + + return HandleCommandResult(stdout=table.get_string()) + + @orchestrator._cli_read_command('cephadm config-check status') + def _config_check_status(self) -> HandleCommandResult: + """Show whether the configuration checker feature is enabled/disabled""" + status = self.get_module_option('config_checks_enabled') + return HandleCommandResult(stdout="Enabled" if status else "Disabled") + + @orchestrator._cli_write_command('cephadm config-check enable') + def _config_check_enable(self, check_name: str) -> HandleCommandResult: + """Enable a specific configuration check""" + if not self._config_check_valid(check_name): + return HandleCommandResult(retval=1, stderr="Invalid check name") + + err, msg = self._update_config_check(check_name, 'enabled') + if err: + return HandleCommandResult( + retval=err, + stderr=f"Failed to enable check '{check_name}' : {msg}") + + return HandleCommandResult(stdout="ok") + + @orchestrator._cli_write_command('cephadm config-check disable') + def _config_check_disable(self, check_name: str) -> HandleCommandResult: + """Disable a specific configuration check""" + if not self._config_check_valid(check_name): + return HandleCommandResult(retval=1, stderr="Invalid check name") + + err, msg = self._update_config_check(check_name, 'disabled') + if err: + return HandleCommandResult(retval=err, stderr=f"Failed to disable check '{check_name}': {msg}") + else: + # drop any outstanding raised healthcheck for this check + config_check = self.config_checker.lookup_check(check_name) + if config_check: + if config_check.healthcheck_name in self.health_checks: + self.health_checks.pop(config_check.healthcheck_name, None) + self.set_health_checks(self.health_checks) + else: + self.log.error( + f"Unable to resolve a check name ({check_name}) to a healthcheck definition?") + + return HandleCommandResult(stdout="ok") + + def _config_check_valid(self, check_name: str) -> bool: + return check_name in [chk.name for chk in self.config_checker.health_checks] + + def _update_config_check(self, check_name: str, status: str) -> Tuple[int, str]: + checks_raw = self.get_store('config_checks') + if not checks_raw: + return 1, "config_checks setting is not available" + + checks = json.loads(checks_raw) + checks.update({ + check_name: status + }) + self.log.info(f"updated config check '{check_name}' : {status}") + self.set_store('config_checks', json.dumps(checks)) + return 0, "" + + class ExtraCephConf(NamedTuple): + conf: str + last_modified: Optional[datetime.datetime] + + def extra_ceph_conf(self) -> 'CephadmOrchestrator.ExtraCephConf': + data = self.get_store('extra_ceph_conf') + if not data: + return CephadmOrchestrator.ExtraCephConf('', None) + try: + j = json.loads(data) + except ValueError: + msg = 'Unable to load extra_ceph_conf: Cannot decode JSON' + self.log.exception('%s: \'%s\'', msg, data) + return CephadmOrchestrator.ExtraCephConf('', None) + return CephadmOrchestrator.ExtraCephConf(j['conf'], str_to_datetime(j['last_modified'])) + + def extra_ceph_conf_is_newer(self, dt: datetime.datetime) -> bool: + conf = self.extra_ceph_conf() + if not conf.last_modified: + return False + return conf.last_modified > dt + + @orchestrator._cli_write_command( + 'cephadm osd activate' + ) + def _osd_activate(self, host: List[str]) -> HandleCommandResult: + """ + Start OSD containers for existing OSDs + """ + + @forall_hosts + def run(h: str) -> str: + with self.async_timeout_handler(h, 'cephadm deploy (osd daemon)'): + return self.wait_async(self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd')) + + return HandleCommandResult(stdout='\n'.join(run(host))) + + @orchestrator._cli_read_command('orch client-keyring ls') + def _client_keyring_ls(self, format: Format = Format.plain) -> HandleCommandResult: + """ + List client keyrings under cephadm management + """ + if format != Format.plain: + output = to_format(self.keys.keys.values(), format, many=True, cls=ClientKeyringSpec) + else: + table = PrettyTable( + ['ENTITY', 'PLACEMENT', 'MODE', 'OWNER', 'PATH'], + border=False) + table.align = 'l' + table.left_padding_width = 0 + table.right_padding_width = 2 + for ks in sorted(self.keys.keys.values(), key=lambda ks: ks.entity): + table.add_row(( + ks.entity, ks.placement.pretty_str(), + utils.file_mode_to_str(ks.mode), + f'{ks.uid}:{ks.gid}', + ks.path, + )) + output = table.get_string() + return HandleCommandResult(stdout=output) + + @orchestrator._cli_write_command('orch client-keyring set') + def _client_keyring_set( + self, + entity: str, + placement: str, + owner: Optional[str] = None, + mode: Optional[str] = None, + ) -> HandleCommandResult: + """ + Add or update client keyring under cephadm management + """ + if not entity.startswith('client.'): + raise OrchestratorError('entity must start with client.') + if owner: + try: + uid, gid = map(int, owner.split(':')) + except Exception: + raise OrchestratorError('owner must look like ":", e.g., "0:0"') + else: + uid = 0 + gid = 0 + if mode: + try: + imode = int(mode, 8) + except Exception: + raise OrchestratorError('mode must be an octal mode, e.g. "600"') + else: + imode = 0o600 + pspec = PlacementSpec.from_string(placement) + ks = ClientKeyringSpec(entity, pspec, mode=imode, uid=uid, gid=gid) + self.keys.update(ks) + self._kick_serve_loop() + return HandleCommandResult() + + @orchestrator._cli_write_command('orch client-keyring rm') + def _client_keyring_rm( + self, + entity: str, + ) -> HandleCommandResult: + """ + Remove client keyring from cephadm management + """ + self.keys.rm(entity) + self._kick_serve_loop() + return HandleCommandResult() + + def _get_container_image(self, daemon_name: str) -> Optional[str]: + daemon_type = daemon_name.split('.', 1)[0] # type: ignore + image: Optional[str] = None + if daemon_type in CEPH_IMAGE_TYPES: + # get container image + image = str(self.get_foreign_ceph_option( + utils.name_to_config_section(daemon_name), + 'container_image' + )).strip() + elif daemon_type == 'prometheus': + image = self.container_image_prometheus + elif daemon_type == 'nvmeof': + image = self.container_image_nvmeof + elif daemon_type == 'grafana': + image = self.container_image_grafana + elif daemon_type == 'alertmanager': + image = self.container_image_alertmanager + elif daemon_type == 'node-exporter': + image = self.container_image_node_exporter + elif daemon_type == 'loki': + image = self.container_image_loki + elif daemon_type == 'promtail': + image = self.container_image_promtail + elif daemon_type == 'haproxy': + image = self.container_image_haproxy + elif daemon_type == 'keepalived': + image = self.container_image_keepalived + elif daemon_type == 'elasticsearch': + image = self.container_image_elasticsearch + elif daemon_type == 'jaeger-agent': + image = self.container_image_jaeger_agent + elif daemon_type == 'jaeger-collector': + image = self.container_image_jaeger_collector + elif daemon_type == 'jaeger-query': + image = self.container_image_jaeger_query + elif daemon_type == CustomContainerService.TYPE: + # The image can't be resolved, the necessary information + # is only available when a container is deployed (given + # via spec). + image = None + elif daemon_type == 'snmp-gateway': + image = self.container_image_snmp_gateway + else: + assert False, daemon_type + + self.log.debug('%s container image %s' % (daemon_name, image)) + + return image + + def _check_valid_addr(self, host: str, addr: str) -> str: + # make sure hostname is resolvable before trying to make a connection + try: + ip_addr = utils.resolve_ip(addr) + except OrchestratorError as e: + msg = str(e) + f''' +You may need to supply an address for {addr} + +Please make sure that the host is reachable and accepts connections using the cephadm SSH key +To add the cephadm SSH key to the host: +> ceph cephadm get-pub-key > ~/ceph.pub +> ssh-copy-id -f -i ~/ceph.pub {self.ssh_user}@{addr} + +To check that the host is reachable open a new shell with the --no-hosts flag: +> cephadm shell --no-hosts + +Then run the following: +> ceph cephadm get-ssh-config > ssh_config +> ceph config-key get mgr/cephadm/ssh_identity_key > ~/cephadm_private_key +> chmod 0600 ~/cephadm_private_key +> ssh -F ssh_config -i ~/cephadm_private_key {self.ssh_user}@{addr}''' + raise OrchestratorError(msg) + + if ipaddress.ip_address(ip_addr).is_loopback and host == addr: + # if this is a re-add, use old address. otherwise error + if host not in self.inventory or self.inventory.get_addr(host) == host: + raise OrchestratorError( + (f'Cannot automatically resolve ip address of host {host}. Ip resolved to loopback address: {ip_addr}\n' + + f'Please explicitly provide the address (ceph orch host add {host} --addr )')) + self.log.debug( + f'Received loopback address resolving ip for {host}: {ip_addr}. Falling back to previous address.') + ip_addr = self.inventory.get_addr(host) + try: + with self.async_timeout_handler(host, f'cephadm check-host --expect-hostname {host}'): + out, err, code = self.wait_async(CephadmServe(self)._run_cephadm( + host, cephadmNoImage, 'check-host', + ['--expect-hostname', host], + addr=addr, + error_ok=True, no_fsid=True)) + if code: + msg = 'check-host failed:\n' + '\n'.join(err) + # err will contain stdout and stderr, so we filter on the message text to + # only show the errors + errors = [_i.replace("ERROR: ", "") for _i in err if _i.startswith('ERROR')] + if errors: + msg = f'Host {host} ({addr}) failed check(s): {errors}' + raise OrchestratorError(msg) + except ssh.HostConnectionError as e: + raise OrchestratorError(str(e)) + return ip_addr + + def _add_host(self, spec): + # type: (HostSpec) -> str + """ + Add a host to be managed by the orchestrator. + + :param host: host name + """ + HostSpec.validate(spec) + ip_addr = self._check_valid_addr(spec.hostname, spec.addr) + if spec.addr == spec.hostname and ip_addr: + spec.addr = ip_addr + + if spec.hostname in self.inventory and self.inventory.get_addr(spec.hostname) != spec.addr: + self.cache.refresh_all_host_info(spec.hostname) + + # prime crush map? + if spec.location: + self.check_mon_command({ + 'prefix': 'osd crush add-bucket', + 'name': spec.hostname, + 'type': 'host', + 'args': [f'{k}={v}' for k, v in spec.location.items()], + }) + + if spec.hostname not in self.inventory: + self.cache.prime_empty_host(spec.hostname) + self.inventory.add_host(spec) + self.offline_hosts_remove(spec.hostname) + if spec.status == 'maintenance': + self._set_maintenance_healthcheck() + self.event.set() # refresh stray health check + self.log.info('Added host %s' % spec.hostname) + return "Added host '{}' with addr '{}'".format(spec.hostname, spec.addr) + + @handle_orch_error + def add_host(self, spec: HostSpec) -> str: + return self._add_host(spec) + + @handle_orch_error + def remove_host(self, host: str, force: bool = False, offline: bool = False) -> str: + """ + Remove a host from orchestrator management. + + :param host: host name + :param force: bypass running daemons check + :param offline: remove offline host + """ + + # check if host is offline + host_offline = host in self.offline_hosts + + if host_offline and not offline: + raise OrchestratorValidationError( + "{} is offline, please use --offline and --force to remove this host. This can potentially cause data loss".format(host)) + + if not host_offline and offline: + raise OrchestratorValidationError( + "{} is online, please remove host without --offline.".format(host)) + + if offline and not force: + raise OrchestratorValidationError("Removing an offline host requires --force") + + # check if there are daemons on the host + if not force: + daemons = self.cache.get_daemons_by_host(host) + if daemons: + self.log.warning(f"Blocked {host} removal. Daemons running: {daemons}") + + daemons_table = "" + daemons_table += "{:<20} {:<15}\n".format("type", "id") + daemons_table += "{:<20} {:<15}\n".format("-" * 20, "-" * 15) + for d in daemons: + daemons_table += "{:<20} {:<15}\n".format(d.daemon_type, d.daemon_id) + + raise OrchestratorValidationError("Not allowed to remove %s from cluster. " + "The following daemons are running in the host:" + "\n%s\nPlease run 'ceph orch host drain %s' to remove daemons from host" % ( + host, daemons_table, host)) + + # check, if there we're removing the last _admin host + if not force: + p = PlacementSpec(label=SpecialHostLabels.ADMIN) + admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs()) + if len(admin_hosts) == 1 and admin_hosts[0] == host: + raise OrchestratorValidationError(f"Host {host} is the last host with the '{SpecialHostLabels.ADMIN}'" + f" label. Please add the '{SpecialHostLabels.ADMIN}' label to a host" + " or add --force to this command") + + def run_cmd(cmd_args: dict) -> None: + ret, out, err = self.mon_command(cmd_args) + if ret != 0: + self.log.debug(f"ran {cmd_args} with mon_command") + self.log.error( + f"cmd: {cmd_args.get('prefix')} failed with: {err}. (errno:{ret})") + self.log.debug(f"cmd: {cmd_args.get('prefix')} returns: {out}") + + if offline: + daemons = self.cache.get_daemons_by_host(host) + for d in daemons: + self.log.info(f"removing: {d.name()}") + + if d.daemon_type != 'osd': + self.cephadm_services[daemon_type_to_service(str(d.daemon_type))].pre_remove(d) + self.cephadm_services[daemon_type_to_service( + str(d.daemon_type))].post_remove(d, is_failed_deploy=False) + else: + cmd_args = { + 'prefix': 'osd purge-actual', + 'id': int(str(d.daemon_id)), + 'yes_i_really_mean_it': True + } + run_cmd(cmd_args) + + cmd_args = { + 'prefix': 'osd crush rm', + 'name': host + } + run_cmd(cmd_args) + + self.inventory.rm_host(host) + self.cache.rm_host(host) + self.ssh.reset_con(host) + # if host was in offline host list, we should remove it now. + self.offline_hosts_remove(host) + self.event.set() # refresh stray health check + self.log.info('Removed host %s' % host) + return "Removed {} host '{}'".format('offline' if offline else '', host) + + @handle_orch_error + def update_host_addr(self, host: str, addr: str) -> str: + self._check_valid_addr(host, addr) + self.inventory.set_addr(host, addr) + self.ssh.reset_con(host) + self.event.set() # refresh stray health check + self.log.info('Set host %s addr to %s' % (host, addr)) + return "Updated host '{}' addr to '{}'".format(host, addr) + + @handle_orch_error + def get_hosts(self): + # type: () -> List[orchestrator.HostSpec] + """ + Return a list of hosts managed by the orchestrator. + + Notes: + - skip async: manager reads from cache. + """ + return list(self.inventory.all_specs()) + + @handle_orch_error + def get_facts(self, hostname: Optional[str] = None) -> List[Dict[str, Any]]: + """ + Return a list of hosts metadata(gather_facts) managed by the orchestrator. + + Notes: + - skip async: manager reads from cache. + """ + if hostname: + return [self.cache.get_facts(hostname)] + + return [self.cache.get_facts(hostname) for hostname in self.cache.get_hosts()] + + @handle_orch_error + def add_host_label(self, host: str, label: str) -> str: + self.inventory.add_label(host, label) + self.log.info('Added label %s to host %s' % (label, host)) + self._kick_serve_loop() + return 'Added label %s to host %s' % (label, host) + + @handle_orch_error + def remove_host_label(self, host: str, label: str, force: bool = False) -> str: + # if we remove the _admin label from the only host that has it we could end up + # removing the only instance of the config and keyring and cause issues + if not force and label == SpecialHostLabels.ADMIN: + p = PlacementSpec(label=SpecialHostLabels.ADMIN) + admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs()) + if len(admin_hosts) == 1 and admin_hosts[0] == host: + raise OrchestratorValidationError(f"Host {host} is the last host with the '{SpecialHostLabels.ADMIN}'" + f" label.\nRemoving the {SpecialHostLabels.ADMIN} label from this host could cause the removal" + " of the last cluster config/keyring managed by cephadm.\n" + f"It is recommended to add the {SpecialHostLabels.ADMIN} label to another host" + " before completing this operation.\nIf you're certain this is" + " what you want rerun this command with --force.") + if self.inventory.has_label(host, label): + self.inventory.rm_label(host, label) + msg = f'Removed label {label} from host {host}' + else: + msg = f"Host {host} does not have label '{label}'. Please use 'ceph orch host ls' to list all the labels." + self.log.info(msg) + self._kick_serve_loop() + return msg + + def _host_ok_to_stop(self, hostname: str, force: bool = False) -> Tuple[int, str]: + self.log.debug("running host-ok-to-stop checks") + daemons = self.cache.get_daemons() + daemon_map: Dict[str, List[str]] = defaultdict(lambda: []) + for dd in daemons: + assert dd.hostname is not None + assert dd.daemon_type is not None + assert dd.daemon_id is not None + if dd.hostname == hostname: + daemon_map[dd.daemon_type].append(dd.daemon_id) + + notifications: List[str] = [] + error_notifications: List[str] = [] + okay: bool = True + for daemon_type, daemon_ids in daemon_map.items(): + r = self.cephadm_services[daemon_type_to_service( + daemon_type)].ok_to_stop(daemon_ids, force=force) + if r.retval: + okay = False + # collect error notifications so user can see every daemon causing host + # to not be okay to stop + error_notifications.append(r.stderr) + if r.stdout: + # if extra notifications to print for user, add them to notifications list + notifications.append(r.stdout) + + if not okay: + # at least one daemon is not okay to stop + return 1, '\n'.join(error_notifications) + + if notifications: + return 0, (f'It is presumed safe to stop host {hostname}. ' + + 'Note the following:\n\n' + '\n'.join(notifications)) + return 0, f'It is presumed safe to stop host {hostname}' + + @handle_orch_error + def host_ok_to_stop(self, hostname: str) -> str: + if hostname not in self.cache.get_hosts(): + raise OrchestratorError(f'Cannot find host "{hostname}"') + + rc, msg = self._host_ok_to_stop(hostname) + if rc: + raise OrchestratorError(msg, errno=rc) + + self.log.info(msg) + return msg + + def _set_maintenance_healthcheck(self) -> None: + """Raise/update or clear the maintenance health check as needed""" + + in_maintenance = self.inventory.get_host_with_state("maintenance") + if not in_maintenance: + self.remove_health_warning('HOST_IN_MAINTENANCE') + else: + s = "host is" if len(in_maintenance) == 1 else "hosts are" + self.set_health_warning("HOST_IN_MAINTENANCE", f"{len(in_maintenance)} {s} in maintenance mode", 1, [ + f"{h} is in maintenance" for h in in_maintenance]) + + @handle_orch_error + @host_exists() + def enter_host_maintenance(self, hostname: str, force: bool = False, yes_i_really_mean_it: bool = False) -> str: + """ Attempt to place a cluster host in maintenance + + Placing a host into maintenance disables the cluster's ceph target in systemd + and stops all ceph daemons. If the host is an osd host we apply the noout flag + for the host subtree in crush to prevent data movement during a host maintenance + window. + + :param hostname: (str) name of the host (must match an inventory hostname) + + :raises OrchestratorError: Hostname is invalid, host is already in maintenance + """ + if yes_i_really_mean_it and not force: + raise OrchestratorError("--force must be passed with --yes-i-really-mean-it") + + if len(self.cache.get_hosts()) == 1 and not yes_i_really_mean_it: + raise OrchestratorError("Maintenance feature is not supported on single node clusters") + + # if upgrade is active, deny + if self.upgrade.upgrade_state and not yes_i_really_mean_it: + raise OrchestratorError( + f"Unable to place {hostname} in maintenance with upgrade active/paused") + + tgt_host = self.inventory._inventory[hostname] + if tgt_host.get("status", "").lower() == "maintenance": + raise OrchestratorError(f"Host {hostname} is already in maintenance") + + host_daemons = self.cache.get_daemon_types(hostname) + self.log.debug("daemons on host {}".format(','.join(host_daemons))) + if host_daemons: + # daemons on this host, so check the daemons can be stopped + # and if so, place the host into maintenance by disabling the target + rc, msg = self._host_ok_to_stop(hostname, force) + if rc and not yes_i_really_mean_it: + raise OrchestratorError( + msg + '\nNote: Warnings can be bypassed with the --force flag', errno=rc) + + # call the host-maintenance function + with self.async_timeout_handler(hostname, 'cephadm host-maintenance enter'): + _out, _err, _code = self.wait_async( + CephadmServe(self)._run_cephadm( + hostname, cephadmNoImage, "host-maintenance", + ["enter"], + error_ok=True)) + returned_msg = _err[0].split('\n')[-1] + if (returned_msg.startswith('failed') or returned_msg.startswith('ERROR')) and not yes_i_really_mean_it: + raise OrchestratorError( + f"Failed to place {hostname} into maintenance for cluster {self._cluster_fsid}") + + if "osd" in host_daemons: + crush_node = hostname if '.' not in hostname else hostname.split('.')[0] + rc, out, err = self.mon_command({ + 'prefix': 'osd set-group', + 'flags': 'noout', + 'who': [crush_node], + 'format': 'json' + }) + if rc and not yes_i_really_mean_it: + self.log.warning( + f"maintenance mode request for {hostname} failed to SET the noout group (rc={rc})") + raise OrchestratorError( + f"Unable to set the osds on {hostname} to noout (rc={rc})") + elif not rc: + self.log.info( + f"maintenance mode request for {hostname} has SET the noout group") + + # update the host status in the inventory + tgt_host["status"] = "maintenance" + self.inventory._inventory[hostname] = tgt_host + self.inventory.save() + + self._set_maintenance_healthcheck() + return f'Daemons for Ceph cluster {self._cluster_fsid} stopped on host {hostname}. Host {hostname} moved to maintenance mode' + + @handle_orch_error + @host_exists() + def exit_host_maintenance(self, hostname: str) -> str: + """Exit maintenance mode and return a host to an operational state + + Returning from maintenance will enable the clusters systemd target and + start it, and remove any noout that has been added for the host if the + host has osd daemons + + :param hostname: (str) host name + + :raises OrchestratorError: Unable to return from maintenance, or unset the + noout flag + """ + tgt_host = self.inventory._inventory[hostname] + if tgt_host['status'] != "maintenance": + raise OrchestratorError(f"Host {hostname} is not in maintenance mode") + + with self.async_timeout_handler(hostname, 'cephadm host-maintenance exit'): + outs, errs, _code = self.wait_async( + CephadmServe(self)._run_cephadm(hostname, cephadmNoImage, + 'host-maintenance', ['exit'], error_ok=True)) + returned_msg = errs[0].split('\n')[-1] + if returned_msg.startswith('failed') or returned_msg.startswith('ERROR'): + raise OrchestratorError( + f"Failed to exit maintenance state for host {hostname}, cluster {self._cluster_fsid}") + + if "osd" in self.cache.get_daemon_types(hostname): + crush_node = hostname if '.' not in hostname else hostname.split('.')[0] + rc, _out, _err = self.mon_command({ + 'prefix': 'osd unset-group', + 'flags': 'noout', + 'who': [crush_node], + 'format': 'json' + }) + if rc: + self.log.warning( + f"exit maintenance request failed to UNSET the noout group for {hostname}, (rc={rc})") + raise OrchestratorError(f"Unable to set the osds on {hostname} to noout (rc={rc})") + else: + self.log.info( + f"exit maintenance request has UNSET for the noout group on host {hostname}") + + # update the host record status + tgt_host['status'] = "" + self.inventory._inventory[hostname] = tgt_host + self.inventory.save() + + self._set_maintenance_healthcheck() + + return f"Ceph cluster {self._cluster_fsid} on {hostname} has exited maintenance mode" + + @handle_orch_error + @host_exists() + def rescan_host(self, hostname: str) -> str: + """Use cephadm to issue a disk rescan on each HBA + + Some HBAs and external enclosures don't automatically register + device insertion with the kernel, so for these scenarios we need + to manually rescan + + :param hostname: (str) host name + """ + self.log.info(f'disk rescan request sent to host "{hostname}"') + with self.async_timeout_handler(hostname, 'cephadm disk-rescan'): + _out, _err, _code = self.wait_async( + CephadmServe(self)._run_cephadm(hostname, cephadmNoImage, "disk-rescan", + [], no_fsid=True, error_ok=True)) + if not _err: + raise OrchestratorError('Unexpected response from cephadm disk-rescan call') + + msg = _err[0].split('\n')[-1] + log_msg = f'disk rescan: {msg}' + if msg.upper().startswith('OK'): + self.log.info(log_msg) + else: + self.log.warning(log_msg) + + return f'{msg}' + + def get_minimal_ceph_conf(self) -> str: + _, config, _ = self.check_mon_command({ + "prefix": "config generate-minimal-conf", + }) + extra = self.extra_ceph_conf().conf + if extra: + try: + config = self._combine_confs(config, extra) + except Exception as e: + self.log.error(f'Failed to add extra ceph conf settings to minimal ceph conf: {e}') + return config + + def _combine_confs(self, conf1: str, conf2: str) -> str: + section_to_option: Dict[str, List[str]] = {} + final_conf: str = '' + for conf in [conf1, conf2]: + if not conf: + continue + section = '' + for line in conf.split('\n'): + if line.strip().startswith('#') or not line.strip(): + continue + if line.strip().startswith('[') and line.strip().endswith(']'): + section = line.strip().replace('[', '').replace(']', '') + if section not in section_to_option: + section_to_option[section] = [] + else: + section_to_option[section].append(line.strip()) + + first_section = True + for section, options in section_to_option.items(): + if not first_section: + final_conf += '\n' + final_conf += f'[{section}]\n' + for option in options: + final_conf += f'{option}\n' + first_section = False + + return final_conf + + def _invalidate_daemons_and_kick_serve(self, filter_host: Optional[str] = None) -> None: + if filter_host: + self.cache.invalidate_host_daemons(filter_host) + else: + for h in self.cache.get_hosts(): + # Also discover daemons deployed manually + self.cache.invalidate_host_daemons(h) + + self._kick_serve_loop() + + @handle_orch_error + def describe_service(self, service_type: Optional[str] = None, service_name: Optional[str] = None, + refresh: bool = False) -> List[orchestrator.ServiceDescription]: + if refresh: + self._invalidate_daemons_and_kick_serve() + self.log.debug('Kicked serve() loop to refresh all services') + + sm: Dict[str, orchestrator.ServiceDescription] = {} + + # known services + for nm, spec in self.spec_store.all_specs.items(): + if service_type is not None and service_type != spec.service_type: + continue + if service_name is not None and service_name != nm: + continue + + if spec.service_type != 'osd': + size = spec.placement.get_target_count(self.cache.get_schedulable_hosts()) + else: + # osd counting is special + size = 0 + + sm[nm] = orchestrator.ServiceDescription( + spec=spec, + size=size, + running=0, + events=self.events.get_for_service(spec.service_name()), + created=self.spec_store.spec_created[nm], + deleted=self.spec_store.spec_deleted.get(nm, None), + virtual_ip=spec.get_virtual_ip(), + ports=spec.get_port_start(), + ) + if spec.service_type == 'ingress': + # ingress has 2 daemons running per host + # but only if it's the full ingress service, not for keepalive-only + if not cast(IngressSpec, spec).keepalive_only: + sm[nm].size *= 2 + + # factor daemons into status + for h, dm in self.cache.get_daemons_with_volatile_status(): + for name, dd in dm.items(): + assert dd.hostname is not None, f'no hostname for {dd!r}' + assert dd.daemon_type is not None, f'no daemon_type for {dd!r}' + + n: str = dd.service_name() + + if ( + service_type + and service_type != daemon_type_to_service(dd.daemon_type) + ): + continue + if service_name and service_name != n: + continue + + if n not in sm: + # new unmanaged service + spec = ServiceSpec( + unmanaged=True, + service_type=daemon_type_to_service(dd.daemon_type), + service_id=dd.service_id(), + ) + sm[n] = orchestrator.ServiceDescription( + last_refresh=dd.last_refresh, + container_image_id=dd.container_image_id, + container_image_name=dd.container_image_name, + spec=spec, + size=0, + ) + + if dd.status == DaemonDescriptionStatus.running: + sm[n].running += 1 + if dd.daemon_type == 'osd': + # The osd count can't be determined by the Placement spec. + # Showing an actual/expected representation cannot be determined + # here. So we're setting running = size for now. + sm[n].size += 1 + if ( + not sm[n].last_refresh + or not dd.last_refresh + or dd.last_refresh < sm[n].last_refresh # type: ignore + ): + sm[n].last_refresh = dd.last_refresh + + return list(sm.values()) + + @handle_orch_error + def list_daemons(self, + service_name: Optional[str] = None, + daemon_type: Optional[str] = None, + daemon_id: Optional[str] = None, + host: Optional[str] = None, + refresh: bool = False) -> List[orchestrator.DaemonDescription]: + if refresh: + self._invalidate_daemons_and_kick_serve(host) + self.log.debug('Kicked serve() loop to refresh all daemons') + + result = [] + for h, dm in self.cache.get_daemons_with_volatile_status(): + if host and h != host: + continue + for name, dd in dm.items(): + if daemon_type is not None and daemon_type != dd.daemon_type: + continue + if daemon_id is not None and daemon_id != dd.daemon_id: + continue + if service_name is not None and service_name != dd.service_name(): + continue + if not dd.memory_request and dd.daemon_type in ['osd', 'mon']: + dd.memory_request = cast(Optional[int], self.get_foreign_ceph_option( + dd.name(), + f"{dd.daemon_type}_memory_target" + )) + result.append(dd) + return result + + @handle_orch_error + def service_action(self, action: str, service_name: str) -> List[str]: + if service_name not in self.spec_store.all_specs.keys(): + raise OrchestratorError(f'Invalid service name "{service_name}".' + + ' View currently running services using "ceph orch ls"') + dds: List[DaemonDescription] = self.cache.get_daemons_by_service(service_name) + if not dds: + raise OrchestratorError(f'No daemons exist under service name "{service_name}".' + + ' View currently running services using "ceph orch ls"') + if action == 'stop' and service_name.split('.')[0].lower() in ['mgr', 'mon', 'osd']: + return [f'Stopping entire {service_name} service is prohibited.'] + self.log.info('%s service %s' % (action.capitalize(), service_name)) + return [ + self._schedule_daemon_action(dd.name(), action) + for dd in dds + ] + + def _rotate_daemon_key(self, daemon_spec: CephadmDaemonDeploySpec) -> str: + self.log.info(f'Rotating authentication key for {daemon_spec.name()}') + rc, out, err = self.mon_command({ + 'prefix': 'auth get-or-create-pending', + 'entity': daemon_spec.entity_name(), + 'format': 'json', + }) + j = json.loads(out) + pending_key = j[0]['pending_key'] + + # deploy a new keyring file + if daemon_spec.daemon_type != 'osd': + daemon_spec = self.cephadm_services[daemon_type_to_service( + daemon_spec.daemon_type)].prepare_create(daemon_spec) + with self.async_timeout_handler(daemon_spec.host, f'cephadm deploy ({daemon_spec.daemon_type} daemon)'): + self.wait_async(CephadmServe(self)._create_daemon(daemon_spec, reconfig=True)) + + # try to be clever, or fall back to restarting the daemon + rc = -1 + if daemon_spec.daemon_type == 'osd': + rc, out, err = self.tool_exec( + args=['ceph', 'tell', daemon_spec.name(), 'rotate-stored-key', '-i', '-'], + stdin=pending_key.encode() + ) + if not rc: + rc, out, err = self.tool_exec( + args=['ceph', 'tell', daemon_spec.name(), 'rotate-key', '-i', '-'], + stdin=pending_key.encode() + ) + elif daemon_spec.daemon_type == 'mds': + rc, out, err = self.tool_exec( + args=['ceph', 'tell', daemon_spec.name(), 'rotate-key', '-i', '-'], + stdin=pending_key.encode() + ) + elif ( + daemon_spec.daemon_type == 'mgr' + and daemon_spec.daemon_id == self.get_mgr_id() + ): + rc, out, err = self.tool_exec( + args=['ceph', 'tell', daemon_spec.name(), 'rotate-key', '-i', '-'], + stdin=pending_key.encode() + ) + if rc: + self._daemon_action(daemon_spec, 'restart') + + return f'Rotated key for {daemon_spec.name()}' + + def _daemon_action(self, + daemon_spec: CephadmDaemonDeploySpec, + action: str, + image: Optional[str] = None) -> str: + self._daemon_action_set_image(action, image, daemon_spec.daemon_type, + daemon_spec.daemon_id) + + if (action == 'redeploy' or action == 'restart') and self.daemon_is_self(daemon_spec.daemon_type, + daemon_spec.daemon_id): + self.mgr_service.fail_over() + return '' # unreachable + + if action == 'rotate-key': + return self._rotate_daemon_key(daemon_spec) + + if action == 'redeploy' or action == 'reconfig': + if daemon_spec.daemon_type != 'osd': + daemon_spec = self.cephadm_services[daemon_type_to_service( + daemon_spec.daemon_type)].prepare_create(daemon_spec) + else: + # for OSDs, we still need to update config, just not carry out the full + # prepare_create function + daemon_spec.final_config, daemon_spec.deps = self.osd_service.generate_config( + daemon_spec) + with self.async_timeout_handler(daemon_spec.host, f'cephadm deploy ({daemon_spec.daemon_type} daemon)'): + return self.wait_async( + CephadmServe(self)._create_daemon(daemon_spec, reconfig=(action == 'reconfig'))) + + actions = { + 'start': ['reset-failed', 'start'], + 'stop': ['stop'], + 'restart': ['reset-failed', 'restart'], + } + name = daemon_spec.name() + for a in actions[action]: + try: + with self.async_timeout_handler(daemon_spec.host, f'cephadm unit --name {name}'): + out, err, code = self.wait_async(CephadmServe(self)._run_cephadm( + daemon_spec.host, name, 'unit', + ['--name', name, a])) + except Exception: + self.log.exception(f'`{daemon_spec.host}: cephadm unit {name} {a}` failed') + self.cache.invalidate_host_daemons(daemon_spec.host) + msg = "{} {} from host '{}'".format(action, name, daemon_spec.host) + self.events.for_daemon(name, 'INFO', msg) + return msg + + def _daemon_action_set_image(self, action: str, image: Optional[str], daemon_type: str, daemon_id: str) -> None: + if image is not None: + if action != 'redeploy': + raise OrchestratorError( + f'Cannot execute {action} with new image. `action` needs to be `redeploy`') + if daemon_type not in CEPH_IMAGE_TYPES: + raise OrchestratorError( + f'Cannot redeploy {daemon_type}.{daemon_id} with a new image: Supported ' + f'types are: {", ".join(CEPH_IMAGE_TYPES)}') + + self.check_mon_command({ + 'prefix': 'config set', + 'name': 'container_image', + 'value': image, + 'who': utils.name_to_config_section(daemon_type + '.' + daemon_id), + }) + + @handle_orch_error + def daemon_action(self, action: str, daemon_name: str, image: Optional[str] = None) -> str: + d = self.cache.get_daemon(daemon_name) + assert d.daemon_type is not None + assert d.daemon_id is not None + + if (action == 'redeploy' or action == 'restart') and self.daemon_is_self(d.daemon_type, d.daemon_id) \ + and not self.mgr_service.mgr_map_has_standby(): + raise OrchestratorError( + f'Unable to schedule redeploy for {daemon_name}: No standby MGRs') + + if action == 'rotate-key': + if d.daemon_type not in ['mgr', 'osd', 'mds', + 'rgw', 'crash', 'nfs', 'rbd-mirror', 'iscsi']: + raise OrchestratorError( + f'key rotation not supported for {d.daemon_type}' + ) + + self._daemon_action_set_image(action, image, d.daemon_type, d.daemon_id) + + self.log.info(f'Schedule {action} daemon {daemon_name}') + return self._schedule_daemon_action(daemon_name, action) + + def daemon_is_self(self, daemon_type: str, daemon_id: str) -> bool: + return daemon_type == 'mgr' and daemon_id == self.get_mgr_id() + + def get_active_mgr(self) -> DaemonDescription: + return self.mgr_service.get_active_daemon(self.cache.get_daemons_by_type('mgr')) + + def get_active_mgr_digests(self) -> List[str]: + digests = self.mgr_service.get_active_daemon( + self.cache.get_daemons_by_type('mgr')).container_image_digests + return digests if digests else [] + + def _schedule_daemon_action(self, daemon_name: str, action: str) -> str: + dd = self.cache.get_daemon(daemon_name) + assert dd.daemon_type is not None + assert dd.daemon_id is not None + assert dd.hostname is not None + if (action == 'redeploy' or action == 'restart') and self.daemon_is_self(dd.daemon_type, dd.daemon_id) \ + and not self.mgr_service.mgr_map_has_standby(): + raise OrchestratorError( + f'Unable to schedule redeploy for {daemon_name}: No standby MGRs') + self.cache.schedule_daemon_action(dd.hostname, dd.name(), action) + self.cache.save_host(dd.hostname) + msg = "Scheduled to {} {} on host '{}'".format(action, daemon_name, dd.hostname) + self._kick_serve_loop() + return msg + + @handle_orch_error + def remove_daemons(self, names): + # type: (List[str]) -> List[str] + args = [] + for host, dm in self.cache.daemons.items(): + for name in names: + if name in dm: + args.append((name, host)) + if not args: + raise OrchestratorError('Unable to find daemon(s) %s' % (names)) + self.log.info('Remove daemons %s' % ' '.join([a[0] for a in args])) + return self._remove_daemons(args) + + @handle_orch_error + def remove_service(self, service_name: str, force: bool = False) -> str: + self.log.info('Remove service %s' % service_name) + self._trigger_preview_refresh(service_name=service_name) + if service_name in self.spec_store: + if self.spec_store[service_name].spec.service_type in ('mon', 'mgr'): + return f'Unable to remove {service_name} service.\n' \ + f'Note, you might want to mark the {service_name} service as "unmanaged"' + else: + return f"Invalid service '{service_name}'. Use 'ceph orch ls' to list available services.\n" + + # Report list of affected OSDs? + if not force and service_name.startswith('osd.'): + osds_msg = {} + for h, dm in self.cache.get_daemons_with_volatile_status(): + osds_to_remove = [] + for name, dd in dm.items(): + if dd.daemon_type == 'osd' and dd.service_name() == service_name: + osds_to_remove.append(str(dd.daemon_id)) + if osds_to_remove: + osds_msg[h] = osds_to_remove + if osds_msg: + msg = '' + for h, ls in osds_msg.items(): + msg += f'\thost {h}: {" ".join([f"osd.{id}" for id in ls])}' + raise OrchestratorError( + f'If {service_name} is removed then the following OSDs will remain, --force to proceed anyway\n{msg}') + + found = self.spec_store.rm(service_name) + if found and service_name.startswith('osd.'): + self.spec_store.finally_rm(service_name) + self._kick_serve_loop() + return f'Removed service {service_name}' + + @handle_orch_error + def get_inventory(self, host_filter: Optional[orchestrator.InventoryFilter] = None, refresh: bool = False) -> List[orchestrator.InventoryHost]: + """ + Return the storage inventory of hosts matching the given filter. + + :param host_filter: host filter + + TODO: + - add filtering by label + """ + if refresh: + if host_filter and host_filter.hosts: + for h in host_filter.hosts: + self.log.debug(f'will refresh {h} devs') + self.cache.invalidate_host_devices(h) + self.cache.invalidate_host_networks(h) + else: + for h in self.cache.get_hosts(): + self.log.debug(f'will refresh {h} devs') + self.cache.invalidate_host_devices(h) + self.cache.invalidate_host_networks(h) + + self.event.set() + self.log.debug('Kicked serve() loop to refresh devices') + + result = [] + for host, dls in self.cache.devices.items(): + if host_filter and host_filter.hosts and host not in host_filter.hosts: + continue + result.append(orchestrator.InventoryHost(host, + inventory.Devices(dls))) + return result + + @handle_orch_error + def zap_device(self, host: str, path: str) -> str: + """Zap a device on a managed host. + + Use ceph-volume zap to return a device to an unused/free state + + Args: + host (str): hostname of the cluster host + path (str): device path + + Raises: + OrchestratorError: host is not a cluster host + OrchestratorError: host is in maintenance and therefore unavailable + OrchestratorError: device path not found on the host + OrchestratorError: device is known to a different ceph cluster + OrchestratorError: device holds active osd + OrchestratorError: device cache hasn't been populated yet.. + + Returns: + str: output from the zap command + """ + + self.log.info('Zap device %s:%s' % (host, path)) + + if host not in self.inventory.keys(): + raise OrchestratorError( + f"Host '{host}' is not a member of the cluster") + + host_info = self.inventory._inventory.get(host, {}) + if host_info.get('status', '').lower() == 'maintenance': + raise OrchestratorError( + f"Host '{host}' is in maintenance mode, which prevents any actions against it.") + + if host not in self.cache.devices: + raise OrchestratorError( + f"Host '{host} hasn't been scanned yet to determine it's inventory. Please try again later.") + + host_devices = self.cache.devices[host] + path_found = False + osd_id_list: List[str] = [] + + for dev in host_devices: + if dev.path == path: + path_found = True + break + if not path_found: + raise OrchestratorError( + f"Device path '{path}' not found on host '{host}'") + + if osd_id_list: + dev_name = os.path.basename(path) + active_osds: List[str] = [] + for osd_id in osd_id_list: + metadata = self.get_metadata('osd', str(osd_id)) + if metadata: + if metadata.get('hostname', '') == host and dev_name in metadata.get('devices', '').split(','): + active_osds.append("osd." + osd_id) + if active_osds: + raise OrchestratorError( + f"Unable to zap: device '{path}' on {host} has {len(active_osds)} active " + f"OSD{'s' if len(active_osds) > 1 else ''}" + f" ({', '.join(active_osds)}). Use 'ceph orch osd rm' first.") + + cv_args = ['--', 'lvm', 'zap', '--destroy', path] + with self.async_timeout_handler(host, f'cephadm ceph-volume {" ".join(cv_args)}'): + out, err, code = self.wait_async(CephadmServe(self)._run_cephadm( + host, 'osd', 'ceph-volume', cv_args, error_ok=True)) + + self.cache.invalidate_host_devices(host) + self.cache.invalidate_host_networks(host) + if code: + raise OrchestratorError('Zap failed: %s' % '\n'.join(out + err)) + msg = f'zap successful for {path} on {host}' + self.log.info(msg) + + return msg + '\n' + + @handle_orch_error + def blink_device_light(self, ident_fault: str, on: bool, locs: List[orchestrator.DeviceLightLoc]) -> List[str]: + """ + Blink a device light. Calling something like:: + + lsmcli local-disk-ident-led-on --path $path + + If you must, you can customize this via:: + + ceph config-key set mgr/cephadm/blink_device_light_cmd '' + ceph config-key set mgr/cephadm//blink_device_light_cmd '' + + See templates/blink_device_light_cmd.j2 + """ + @forall_hosts + def blink(host: str, dev: str, path: str) -> str: + cmd_line = self.template.render('blink_device_light_cmd.j2', + { + 'on': on, + 'ident_fault': ident_fault, + 'dev': dev, + 'path': path + }, + host=host) + cmd_args = shlex.split(cmd_line) + + with self.async_timeout_handler(host, f'cephadm shell -- {" ".join(cmd_args)}'): + out, err, code = self.wait_async(CephadmServe(self)._run_cephadm( + host, 'osd', 'shell', ['--'] + cmd_args, + error_ok=True)) + if code: + raise OrchestratorError( + 'Unable to affect %s light for %s:%s. Command: %s' % ( + ident_fault, host, dev, ' '.join(cmd_args))) + self.log.info('Set %s light for %s:%s %s' % ( + ident_fault, host, dev, 'on' if on else 'off')) + return "Set %s light for %s:%s %s" % ( + ident_fault, host, dev, 'on' if on else 'off') + + return blink(locs) + + def get_osd_uuid_map(self, only_up=False): + # type: (bool) -> Dict[str, str] + osd_map = self.get('osd_map') + r = {} + for o in osd_map['osds']: + # only include OSDs that have ever started in this map. this way + # an interrupted osd create can be repeated and succeed the second + # time around. + osd_id = o.get('osd') + if osd_id is None: + raise OrchestratorError("Could not retrieve osd_id from osd_map") + if not only_up: + r[str(osd_id)] = o.get('uuid', '') + return r + + def get_osd_by_id(self, osd_id: int) -> Optional[Dict[str, Any]]: + osd = [x for x in self.get('osd_map')['osds'] + if x['osd'] == osd_id] + + if len(osd) != 1: + return None + + return osd[0] + + def _trigger_preview_refresh(self, + specs: Optional[List[DriveGroupSpec]] = None, + service_name: Optional[str] = None, + ) -> None: + # Only trigger a refresh when a spec has changed + trigger_specs = [] + if specs: + for spec in specs: + preview_spec = self.spec_store.spec_preview.get(spec.service_name()) + # the to-be-preview spec != the actual spec, this means we need to + # trigger a refresh, if the spec has been removed (==None) we need to + # refresh as well. + if not preview_spec or spec != preview_spec: + trigger_specs.append(spec) + if service_name: + trigger_specs = [cast(DriveGroupSpec, self.spec_store.spec_preview.get(service_name))] + if not any(trigger_specs): + return None + + refresh_hosts = self.osd_service.resolve_hosts_for_osdspecs(specs=trigger_specs) + for host in refresh_hosts: + self.log.info(f"Marking host: {host} for OSDSpec preview refresh.") + self.cache.osdspec_previews_refresh_queue.append(host) + + @handle_orch_error + def apply_drivegroups(self, specs: List[DriveGroupSpec]) -> List[str]: + """ + Deprecated. Please use `apply()` instead. + + Keeping this around to be compatible to mgr/dashboard + """ + return [self._apply(spec) for spec in specs] + + @handle_orch_error + def create_osds(self, drive_group: DriveGroupSpec) -> str: + hosts: List[HostSpec] = self.inventory.all_specs() + filtered_hosts: List[str] = drive_group.placement.filter_matching_hostspecs(hosts) + if not filtered_hosts: + return "Invalid 'host:device' spec: host not found in cluster. Please check 'ceph orch host ls' for available hosts" + return self.osd_service.create_from_spec(drive_group) + + def _preview_osdspecs(self, + osdspecs: Optional[List[DriveGroupSpec]] = None + ) -> dict: + if not osdspecs: + return {'n/a': [{'error': True, + 'message': 'No OSDSpec or matching hosts found.'}]} + matching_hosts = self.osd_service.resolve_hosts_for_osdspecs(specs=osdspecs) + if not matching_hosts: + return {'n/a': [{'error': True, + 'message': 'No OSDSpec or matching hosts found.'}]} + # Is any host still loading previews or still in the queue to be previewed + pending_hosts = {h for h in self.cache.loading_osdspec_preview if h in matching_hosts} + if pending_hosts or any(item in self.cache.osdspec_previews_refresh_queue for item in matching_hosts): + # Report 'pending' when any of the matching hosts is still loading previews (flag is True) + return {'n/a': [{'error': True, + 'message': 'Preview data is being generated.. ' + 'Please re-run this command in a bit.'}]} + # drop all keys that are not in search_hosts and only select reports that match the requested osdspecs + previews_for_specs = {} + for host, raw_reports in self.cache.osdspec_previews.items(): + if host not in matching_hosts: + continue + osd_reports = [] + for osd_report in raw_reports: + if osd_report.get('osdspec') in [x.service_id for x in osdspecs]: + osd_reports.append(osd_report) + previews_for_specs.update({host: osd_reports}) + return previews_for_specs + + def _calc_daemon_deps(self, + spec: Optional[ServiceSpec], + daemon_type: str, + daemon_id: str) -> List[str]: + + def get_daemon_names(daemons: List[str]) -> List[str]: + daemon_names = [] + for daemon_type in daemons: + for dd in self.cache.get_daemons_by_type(daemon_type): + daemon_names.append(dd.name()) + return daemon_names + + alertmanager_user, alertmanager_password = self._get_alertmanager_credentials() + prometheus_user, prometheus_password = self._get_prometheus_credentials() + + deps = [] + if daemon_type == 'haproxy': + # because cephadm creates new daemon instances whenever + # port or ip changes, identifying daemons by name is + # sufficient to detect changes. + if not spec: + return [] + ingress_spec = cast(IngressSpec, spec) + assert ingress_spec.backend_service + daemons = self.cache.get_daemons_by_service(ingress_spec.backend_service) + deps = [d.name() for d in daemons] + elif daemon_type == 'keepalived': + # because cephadm creates new daemon instances whenever + # port or ip changes, identifying daemons by name is + # sufficient to detect changes. + if not spec: + return [] + daemons = self.cache.get_daemons_by_service(spec.service_name()) + deps = [d.name() for d in daemons if d.daemon_type == 'haproxy'] + elif daemon_type == 'agent': + root_cert = '' + server_port = '' + try: + server_port = str(self.http_server.agent.server_port) + root_cert = self.http_server.agent.ssl_certs.get_root_cert() + except Exception: + pass + deps = sorted([self.get_mgr_ip(), server_port, root_cert, + str(self.device_enhanced_scan)]) + elif daemon_type == 'iscsi': + if spec: + iscsi_spec = cast(IscsiServiceSpec, spec) + deps = [self.iscsi_service.get_trusted_ips(iscsi_spec)] + else: + deps = [self.get_mgr_ip()] + elif daemon_type == 'prometheus': + # for prometheus we add the active mgr as an explicit dependency, + # this way we force a redeploy after a mgr failover + deps.append(self.get_active_mgr().name()) + deps.append(str(self.get_module_option_ex('prometheus', 'server_port', 9283))) + deps.append(str(self.service_discovery_port)) + # prometheus yaml configuration file (generated by prometheus.yml.j2) contains + # a scrape_configs section for each service type. This should be included only + # when at least one daemon of the corresponding service is running. Therefore, + # an explicit dependency is added for each service-type to force a reconfig + # whenever the number of daemons for those service-type changes from 0 to greater + # than zero and vice versa. + deps += [s for s in ['node-exporter', 'alertmanager'] + if self.cache.get_daemons_by_service(s)] + if len(self.cache.get_daemons_by_type('ingress')) > 0: + deps.append('ingress') + # add dependency on ceph-exporter daemons + deps += [d.name() for d in self.cache.get_daemons_by_service('ceph-exporter')] + if self.secure_monitoring_stack: + if prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') + if alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') + elif daemon_type == 'grafana': + deps += get_daemon_names(['prometheus', 'loki']) + if self.secure_monitoring_stack and prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') + elif daemon_type == 'alertmanager': + deps += get_daemon_names(['mgr', 'alertmanager', 'snmp-gateway']) + if self.secure_monitoring_stack and alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') + elif daemon_type == 'promtail': + deps += get_daemon_names(['loki']) + else: + # TODO(redo): some error message! + pass + + if daemon_type in ['prometheus', 'node-exporter', 'alertmanager', 'grafana']: + deps.append(f'secure_monitoring_stack:{self.secure_monitoring_stack}') + + return sorted(deps) + + @forall_hosts + def _remove_daemons(self, name: str, host: str) -> str: + return CephadmServe(self)._remove_daemon(name, host) + + def _check_pool_exists(self, pool: str, service_name: str) -> None: + logger.info(f'Checking pool "{pool}" exists for service {service_name}') + if not self.rados.pool_exists(pool): + raise OrchestratorError(f'Cannot find pool "{pool}" for ' + f'service {service_name}') + + def _add_daemon(self, + daemon_type: str, + spec: ServiceSpec) -> List[str]: + """ + Add (and place) a daemon. Require explicit host placement. Do not + schedule, and do not apply the related scheduling limitations. + """ + if spec.service_name() not in self.spec_store: + raise OrchestratorError('Unable to add a Daemon without Service.\n' + 'Please use `ceph orch apply ...` to create a Service.\n' + 'Note, you might want to create the service with "unmanaged=true"') + + self.log.debug('_add_daemon %s spec %s' % (daemon_type, spec.placement)) + if not spec.placement.hosts: + raise OrchestratorError('must specify host(s) to deploy on') + count = spec.placement.count or len(spec.placement.hosts) + daemons = self.cache.get_daemons_by_service(spec.service_name()) + return self._create_daemons(daemon_type, spec, daemons, + spec.placement.hosts, count) + + def _create_daemons(self, + daemon_type: str, + spec: ServiceSpec, + daemons: List[DaemonDescription], + hosts: List[HostPlacementSpec], + count: int) -> List[str]: + if count > len(hosts): + raise OrchestratorError('too few hosts: want %d, have %s' % ( + count, hosts)) + + did_config = False + service_type = daemon_type_to_service(daemon_type) + + args = [] # type: List[CephadmDaemonDeploySpec] + for host, network, name in hosts: + daemon_id = self.get_unique_name(daemon_type, host, daemons, + prefix=spec.service_id, + forcename=name) + + if not did_config: + self.cephadm_services[service_type].config(spec) + did_config = True + + daemon_spec = self.cephadm_services[service_type].make_daemon_spec( + host, daemon_id, network, spec, + # NOTE: this does not consider port conflicts! + ports=spec.get_port_start()) + self.log.debug('Placing %s.%s on host %s' % ( + daemon_type, daemon_id, host)) + args.append(daemon_spec) + + # add to daemon list so next name(s) will also be unique + sd = orchestrator.DaemonDescription( + hostname=host, + daemon_type=daemon_type, + daemon_id=daemon_id, + ) + daemons.append(sd) + + @ forall_hosts + def create_func_map(*args: Any) -> str: + daemon_spec = self.cephadm_services[daemon_type].prepare_create(*args) + with self.async_timeout_handler(daemon_spec.host, f'cephadm deploy ({daemon_spec.daemon_type} daemon)'): + return self.wait_async(CephadmServe(self)._create_daemon(daemon_spec)) + + return create_func_map(args) + + @handle_orch_error + def add_daemon(self, spec: ServiceSpec) -> List[str]: + ret: List[str] = [] + try: + with orchestrator.set_exception_subject('service', spec.service_name(), overwrite=True): + for d_type in service_to_daemon_types(spec.service_type): + ret.extend(self._add_daemon(d_type, spec)) + return ret + except OrchestratorError as e: + self.events.from_orch_error(e) + raise + + def _get_alertmanager_credentials(self) -> Tuple[str, str]: + user = self.get_store(AlertmanagerService.USER_CFG_KEY) + password = self.get_store(AlertmanagerService.PASS_CFG_KEY) + if user is None or password is None: + user = 'admin' + password = 'admin' + self.set_store(AlertmanagerService.USER_CFG_KEY, user) + self.set_store(AlertmanagerService.PASS_CFG_KEY, password) + return (user, password) + + def _get_prometheus_credentials(self) -> Tuple[str, str]: + user = self.get_store(PrometheusService.USER_CFG_KEY) + password = self.get_store(PrometheusService.PASS_CFG_KEY) + if user is None or password is None: + user = 'admin' + password = 'admin' + self.set_store(PrometheusService.USER_CFG_KEY, user) + self.set_store(PrometheusService.PASS_CFG_KEY, password) + return (user, password) + + @handle_orch_error + def set_prometheus_access_info(self, user: str, password: str) -> str: + self.set_store(PrometheusService.USER_CFG_KEY, user) + self.set_store(PrometheusService.PASS_CFG_KEY, password) + return 'prometheus credentials updated correctly' + + @handle_orch_error + def set_alertmanager_access_info(self, user: str, password: str) -> str: + self.set_store(AlertmanagerService.USER_CFG_KEY, user) + self.set_store(AlertmanagerService.PASS_CFG_KEY, password) + return 'alertmanager credentials updated correctly' + + @handle_orch_error + def get_prometheus_access_info(self) -> Dict[str, str]: + user, password = self._get_prometheus_credentials() + return {'user': user, + 'password': password, + 'certificate': self.http_server.service_discovery.ssl_certs.get_root_cert()} + + @handle_orch_error + def get_alertmanager_access_info(self) -> Dict[str, str]: + user, password = self._get_alertmanager_credentials() + return {'user': user, + 'password': password, + 'certificate': self.http_server.service_discovery.ssl_certs.get_root_cert()} + + @handle_orch_error + def apply_mon(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + def _apply(self, spec: GenericSpec) -> str: + if spec.service_type == 'host': + return self._add_host(cast(HostSpec, spec)) + + if spec.service_type == 'osd': + # _trigger preview refresh needs to be smart and + # should only refresh if a change has been detected + self._trigger_preview_refresh(specs=[cast(DriveGroupSpec, spec)]) + + return self._apply_service_spec(cast(ServiceSpec, spec)) + + def _get_candidate_hosts(self, placement: PlacementSpec) -> List[str]: + """Return a list of candidate hosts according to the placement specification.""" + all_hosts = self.cache.get_schedulable_hosts() + candidates = [] + if placement.hosts: + candidates = [h.hostname for h in placement.hosts if h.hostname in placement.hosts] + elif placement.label: + candidates = [x.hostname for x in [h for h in all_hosts if placement.label in h.labels]] + elif placement.host_pattern: + candidates = [x for x in placement.filter_matching_hostspecs(all_hosts)] + elif (placement.count is not None or placement.count_per_host is not None): + candidates = [x.hostname for x in all_hosts] + return [h for h in candidates if not self.cache.is_host_draining(h)] + + def _validate_one_shot_placement_spec(self, spec: PlacementSpec) -> None: + """Validate placement specification for TunedProfileSpec and ClientKeyringSpec.""" + if spec.count is not None: + raise OrchestratorError( + "Placement 'count' field is no supported for this specification.") + if spec.count_per_host is not None: + raise OrchestratorError( + "Placement 'count_per_host' field is no supported for this specification.") + if spec.hosts: + all_hosts = [h.hostname for h in self.inventory.all_specs()] + invalid_hosts = [h.hostname for h in spec.hosts if h.hostname not in all_hosts] + if invalid_hosts: + raise OrchestratorError(f"Found invalid host(s) in placement section: {invalid_hosts}. " + f"Please check 'ceph orch host ls' for available hosts.") + elif not self._get_candidate_hosts(spec): + raise OrchestratorError("Invalid placement specification. No host(s) matched placement spec.\n" + "Please check 'ceph orch host ls' for available hosts.\n" + "Note: draining hosts are excluded from the candidate list.") + + def _validate_tunedprofile_settings(self, spec: TunedProfileSpec) -> Dict[str, List[str]]: + candidate_hosts = spec.placement.filter_matching_hostspecs(self.inventory.all_specs()) + invalid_options: Dict[str, List[str]] = {} + for host in candidate_hosts: + host_sysctl_options = self.cache.get_facts(host).get('sysctl_options', {}) + invalid_options[host] = [] + for option in spec.settings: + if option not in host_sysctl_options: + invalid_options[host].append(option) + return invalid_options + + def _validate_tuned_profile_spec(self, spec: TunedProfileSpec) -> None: + if not spec.settings: + raise OrchestratorError("Invalid spec: settings section cannot be empty.") + self._validate_one_shot_placement_spec(spec.placement) + invalid_options = self._validate_tunedprofile_settings(spec) + if any(e for e in invalid_options.values()): + raise OrchestratorError( + f'Failed to apply tuned profile. Invalid sysctl option(s) for host(s) detected: {invalid_options}') + + @handle_orch_error + def apply_tuned_profiles(self, specs: List[TunedProfileSpec], no_overwrite: bool = False) -> str: + outs = [] + for spec in specs: + self._validate_tuned_profile_spec(spec) + if no_overwrite and self.tuned_profiles.exists(spec.profile_name): + outs.append( + f"Tuned profile '{spec.profile_name}' already exists (--no-overwrite was passed)") + else: + # done, let's save the specs + self.tuned_profiles.add_profile(spec) + outs.append(f'Saved tuned profile {spec.profile_name}') + self._kick_serve_loop() + return '\n'.join(outs) + + @handle_orch_error + def rm_tuned_profile(self, profile_name: str) -> str: + if profile_name not in self.tuned_profiles: + raise OrchestratorError( + f'Tuned profile {profile_name} does not exist. Nothing to remove.') + self.tuned_profiles.rm_profile(profile_name) + self._kick_serve_loop() + return f'Removed tuned profile {profile_name}' + + @handle_orch_error + def tuned_profile_ls(self) -> List[TunedProfileSpec]: + return self.tuned_profiles.list_profiles() + + @handle_orch_error + def tuned_profile_add_setting(self, profile_name: str, setting: str, value: str) -> str: + if profile_name not in self.tuned_profiles: + raise OrchestratorError( + f'Tuned profile {profile_name} does not exist. Cannot add setting.') + self.tuned_profiles.add_setting(profile_name, setting, value) + self._kick_serve_loop() + return f'Added setting {setting} with value {value} to tuned profile {profile_name}' + + @handle_orch_error + def tuned_profile_rm_setting(self, profile_name: str, setting: str) -> str: + if profile_name not in self.tuned_profiles: + raise OrchestratorError( + f'Tuned profile {profile_name} does not exist. Cannot remove setting.') + self.tuned_profiles.rm_setting(profile_name, setting) + self._kick_serve_loop() + return f'Removed setting {setting} from tuned profile {profile_name}' + + @handle_orch_error + def service_discovery_dump_cert(self) -> str: + root_cert = self.get_store(ServiceDiscovery.KV_STORE_SD_ROOT_CERT) + if not root_cert: + raise OrchestratorError('No certificate found for service discovery') + return root_cert + + def set_health_warning(self, name: str, summary: str, count: int, detail: List[str]) -> None: + self.health_checks[name] = { + 'severity': 'warning', + 'summary': summary, + 'count': count, + 'detail': detail, + } + self.set_health_checks(self.health_checks) + + def remove_health_warning(self, name: str) -> None: + if name in self.health_checks: + del self.health_checks[name] + self.set_health_checks(self.health_checks) + + def _plan(self, spec: ServiceSpec) -> dict: + if spec.service_type == 'osd': + return {'service_name': spec.service_name(), + 'service_type': spec.service_type, + 'data': self._preview_osdspecs(osdspecs=[cast(DriveGroupSpec, spec)])} + + svc = self.cephadm_services[spec.service_type] + ha = HostAssignment( + spec=spec, + hosts=self.cache.get_schedulable_hosts(), + unreachable_hosts=self.cache.get_unreachable_hosts(), + draining_hosts=self.cache.get_draining_hosts(), + networks=self.cache.networks, + daemons=self.cache.get_daemons_by_service(spec.service_name()), + allow_colo=svc.allow_colo(), + rank_map=self.spec_store[spec.service_name()].rank_map if svc.ranked() else None + ) + ha.validate() + hosts, to_add, to_remove = ha.place() + + return { + 'service_name': spec.service_name(), + 'service_type': spec.service_type, + 'add': [hs.hostname for hs in to_add], + 'remove': [d.name() for d in to_remove] + } + + @handle_orch_error + def plan(self, specs: Sequence[GenericSpec]) -> List: + results = [{'warning': 'WARNING! Dry-Runs are snapshots of a certain point in time and are bound \n' + 'to the current inventory setup. If any of these conditions change, the \n' + 'preview will be invalid. Please make sure to have a minimal \n' + 'timeframe between planning and applying the specs.'}] + if any([spec.service_type == 'host' for spec in specs]): + return [{'error': 'Found . Previews that include Host Specifications are not supported, yet.'}] + for spec in specs: + results.append(self._plan(cast(ServiceSpec, spec))) + return results + + def _apply_service_spec(self, spec: ServiceSpec) -> str: + if spec.placement.is_empty(): + # fill in default placement + defaults = { + 'mon': PlacementSpec(count=5), + 'mgr': PlacementSpec(count=2), + 'mds': PlacementSpec(count=2), + 'rgw': PlacementSpec(count=2), + 'ingress': PlacementSpec(count=2), + 'iscsi': PlacementSpec(count=1), + 'nvmeof': PlacementSpec(count=1), + 'rbd-mirror': PlacementSpec(count=2), + 'cephfs-mirror': PlacementSpec(count=1), + 'nfs': PlacementSpec(count=1), + 'grafana': PlacementSpec(count=1), + 'alertmanager': PlacementSpec(count=1), + 'prometheus': PlacementSpec(count=1), + 'node-exporter': PlacementSpec(host_pattern='*'), + 'ceph-exporter': PlacementSpec(host_pattern='*'), + 'loki': PlacementSpec(count=1), + 'promtail': PlacementSpec(host_pattern='*'), + 'crash': PlacementSpec(host_pattern='*'), + 'container': PlacementSpec(count=1), + 'snmp-gateway': PlacementSpec(count=1), + 'elasticsearch': PlacementSpec(count=1), + 'jaeger-agent': PlacementSpec(host_pattern='*'), + 'jaeger-collector': PlacementSpec(count=1), + 'jaeger-query': PlacementSpec(count=1) + } + spec.placement = defaults[spec.service_type] + elif spec.service_type in ['mon', 'mgr'] and \ + spec.placement.count is not None and \ + spec.placement.count < 1: + raise OrchestratorError('cannot scale %s service below 1' % ( + spec.service_type)) + + host_count = len(self.inventory.keys()) + max_count = self.max_count_per_host + + if spec.placement.count is not None: + if spec.service_type in ['mon', 'mgr']: + if spec.placement.count > max(5, host_count): + raise OrchestratorError( + (f'The maximum number of {spec.service_type} daemons allowed with {host_count} hosts is {max(5, host_count)}.')) + elif spec.service_type != 'osd': + if spec.placement.count > (max_count * host_count): + raise OrchestratorError((f'The maximum number of {spec.service_type} daemons allowed with {host_count} hosts is {host_count*max_count} ({host_count}x{max_count}).' + + ' This limit can be adjusted by changing the mgr/cephadm/max_count_per_host config option')) + + if spec.placement.count_per_host is not None and spec.placement.count_per_host > max_count and spec.service_type != 'osd': + raise OrchestratorError((f'The maximum count_per_host allowed is {max_count}.' + + ' This limit can be adjusted by changing the mgr/cephadm/max_count_per_host config option')) + + HostAssignment( + spec=spec, + hosts=self.inventory.all_specs(), # All hosts, even those without daemon refresh + unreachable_hosts=self.cache.get_unreachable_hosts(), + draining_hosts=self.cache.get_draining_hosts(), + networks=self.cache.networks, + daemons=self.cache.get_daemons_by_service(spec.service_name()), + allow_colo=self.cephadm_services[spec.service_type].allow_colo(), + ).validate() + + self.log.info('Saving service %s spec with placement %s' % ( + spec.service_name(), spec.placement.pretty_str())) + self.spec_store.save(spec) + self._kick_serve_loop() + return "Scheduled %s update..." % spec.service_name() + + @handle_orch_error + def apply(self, specs: Sequence[GenericSpec], no_overwrite: bool = False) -> List[str]: + results = [] + for spec in specs: + if no_overwrite: + if spec.service_type == 'host' and cast(HostSpec, spec).hostname in self.inventory: + results.append('Skipped %s host spec. To change %s spec omit --no-overwrite flag' + % (cast(HostSpec, spec).hostname, spec.service_type)) + continue + elif cast(ServiceSpec, spec).service_name() in self.spec_store: + results.append('Skipped %s service spec. To change %s spec omit --no-overwrite flag' + % (cast(ServiceSpec, spec).service_name(), cast(ServiceSpec, spec).service_name())) + continue + results.append(self._apply(spec)) + return results + + @handle_orch_error + def apply_mgr(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_mds(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_rgw(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_ingress(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_iscsi(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_rbd_mirror(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_nfs(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + def _get_dashboard_url(self): + # type: () -> str + return self.get('mgr_map').get('services', {}).get('dashboard', '') + + @handle_orch_error + def apply_prometheus(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_loki(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_promtail(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_node_exporter(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_ceph_exporter(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_crash(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_grafana(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_alertmanager(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_container(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def apply_snmp_gateway(self, spec: ServiceSpec) -> str: + return self._apply(spec) + + @handle_orch_error + def set_unmanaged(self, service_name: str, value: bool) -> str: + return self.spec_store.set_unmanaged(service_name, value) + + @handle_orch_error + def upgrade_check(self, image: str, version: str) -> str: + if self.inventory.get_host_with_state("maintenance"): + raise OrchestratorError("check aborted - you have hosts in maintenance state") + + if version: + target_name = self.container_image_base + ':v' + version + elif image: + target_name = image + else: + raise OrchestratorError('must specify either image or version') + + with self.async_timeout_handler(cmd=f'cephadm inspect-image (image {target_name})'): + image_info = self.wait_async(CephadmServe(self)._get_container_image_info(target_name)) + + ceph_image_version = image_info.ceph_version + if not ceph_image_version: + return f'Unable to extract ceph version from {target_name}.' + if ceph_image_version.startswith('ceph version '): + ceph_image_version = ceph_image_version.split(' ')[2] + version_error = self.upgrade._check_target_version(ceph_image_version) + if version_error: + return f'Incompatible upgrade: {version_error}' + + self.log.debug(f'image info {image} -> {image_info}') + r: dict = { + 'target_name': target_name, + 'target_id': image_info.image_id, + 'target_version': image_info.ceph_version, + 'needs_update': dict(), + 'up_to_date': list(), + 'non_ceph_image_daemons': list() + } + for host, dm in self.cache.daemons.items(): + for name, dd in dm.items(): + # check if the container digest for the digest we're checking upgrades for matches + # the container digests for the daemon if "use_repo_digest" setting is true + # or that the image name matches the daemon's image name if "use_repo_digest" + # is false. The idea is to generally check if the daemon is already using + # the image we're checking upgrade to. + if ( + (self.use_repo_digest and dd.matches_digests(image_info.repo_digests)) + or (not self.use_repo_digest and dd.matches_image_name(image)) + ): + r['up_to_date'].append(dd.name()) + elif dd.daemon_type in CEPH_IMAGE_TYPES: + r['needs_update'][dd.name()] = { + 'current_name': dd.container_image_name, + 'current_id': dd.container_image_id, + 'current_version': dd.version, + } + else: + r['non_ceph_image_daemons'].append(dd.name()) + if self.use_repo_digest and image_info.repo_digests: + # FIXME: we assume the first digest is the best one to use + r['target_digest'] = image_info.repo_digests[0] + + return json.dumps(r, indent=4, sort_keys=True) + + @handle_orch_error + def upgrade_status(self) -> orchestrator.UpgradeStatusSpec: + return self.upgrade.upgrade_status() + + @handle_orch_error + def upgrade_ls(self, image: Optional[str], tags: bool, show_all_versions: Optional[bool]) -> Dict[Any, Any]: + return self.upgrade.upgrade_ls(image, tags, show_all_versions) + + @handle_orch_error + def upgrade_start(self, image: str, version: str, daemon_types: Optional[List[str]] = None, host_placement: Optional[str] = None, + services: Optional[List[str]] = None, limit: Optional[int] = None) -> str: + if self.inventory.get_host_with_state("maintenance"): + raise OrchestratorError("Upgrade aborted - you have host(s) in maintenance state") + if self.offline_hosts: + raise OrchestratorError( + f"Upgrade aborted - Some host(s) are currently offline: {self.offline_hosts}") + if daemon_types is not None and services is not None: + raise OrchestratorError('--daemon-types and --services are mutually exclusive') + if daemon_types is not None: + for dtype in daemon_types: + if dtype not in CEPH_UPGRADE_ORDER: + raise OrchestratorError(f'Upgrade aborted - Got unexpected daemon type "{dtype}".\n' + f'Viable daemon types for this command are: {utils.CEPH_TYPES + utils.GATEWAY_TYPES}') + if services is not None: + for service in services: + if service not in self.spec_store: + raise OrchestratorError(f'Upgrade aborted - Got unknown service name "{service}".\n' + f'Known services are: {self.spec_store.all_specs.keys()}') + hosts: Optional[List[str]] = None + if host_placement is not None: + all_hosts = list(self.inventory.all_specs()) + placement = PlacementSpec.from_string(host_placement) + hosts = placement.filter_matching_hostspecs(all_hosts) + if not hosts: + raise OrchestratorError( + f'Upgrade aborted - hosts parameter "{host_placement}" provided did not match any hosts') + + if limit is not None: + if limit < 1: + raise OrchestratorError( + f'Upgrade aborted - --limit arg must be a positive integer, not {limit}') + + return self.upgrade.upgrade_start(image, version, daemon_types, hosts, services, limit) + + @handle_orch_error + def upgrade_pause(self) -> str: + return self.upgrade.upgrade_pause() + + @handle_orch_error + def upgrade_resume(self) -> str: + return self.upgrade.upgrade_resume() + + @handle_orch_error + def upgrade_stop(self) -> str: + return self.upgrade.upgrade_stop() + + @handle_orch_error + def remove_osds(self, osd_ids: List[str], + replace: bool = False, + force: bool = False, + zap: bool = False, + no_destroy: bool = False) -> str: + """ + Takes a list of OSDs and schedules them for removal. + The function that takes care of the actual removal is + process_removal_queue(). + """ + + daemons: List[orchestrator.DaemonDescription] = self.cache.get_daemons_by_type('osd') + to_remove_daemons = list() + for daemon in daemons: + if daemon.daemon_id in osd_ids: + to_remove_daemons.append(daemon) + if not to_remove_daemons: + return f"Unable to find OSDs: {osd_ids}" + + for daemon in to_remove_daemons: + assert daemon.daemon_id is not None + try: + self.to_remove_osds.enqueue(OSD(osd_id=int(daemon.daemon_id), + replace=replace, + force=force, + zap=zap, + no_destroy=no_destroy, + hostname=daemon.hostname, + process_started_at=datetime_now(), + remove_util=self.to_remove_osds.rm_util)) + except NotFoundError: + return f"Unable to find OSDs: {osd_ids}" + + # trigger the serve loop to initiate the removal + self._kick_serve_loop() + warning_zap = "" if zap else ("\nVG/LV for the OSDs won't be zapped (--zap wasn't passed).\n" + "Run the `ceph-volume lvm zap` command with `--destroy`" + " against the VG/LV if you want them to be destroyed.") + return f"Scheduled OSD(s) for removal.{warning_zap}" + + @handle_orch_error + def stop_remove_osds(self, osd_ids: List[str]) -> str: + """ + Stops a `removal` process for a List of OSDs. + This will revert their weight and remove it from the osds_to_remove queue + """ + for osd_id in osd_ids: + try: + self.to_remove_osds.rm(OSD(osd_id=int(osd_id), + remove_util=self.to_remove_osds.rm_util)) + except (NotFoundError, KeyError, ValueError): + return f'Unable to find OSD in the queue: {osd_id}' + + # trigger the serve loop to halt the removal + self._kick_serve_loop() + return "Stopped OSD(s) removal" + + @handle_orch_error + def remove_osds_status(self) -> List[OSD]: + """ + The CLI call to retrieve an osd removal report + """ + return self.to_remove_osds.all_osds() + + @handle_orch_error + def drain_host(self, hostname: str, force: bool = False, keep_conf_keyring: bool = False, zap_osd_devices: bool = False) -> str: + """ + Drain all daemons from a host. + :param host: host name + """ + + # if we drain the last admin host we could end up removing the only instance + # of the config and keyring and cause issues + if not force: + p = PlacementSpec(label=SpecialHostLabels.ADMIN) + admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs()) + if len(admin_hosts) == 1 and admin_hosts[0] == hostname: + raise OrchestratorValidationError(f"Host {hostname} is the last host with the '{SpecialHostLabels.ADMIN}'" + " label.\nDraining this host could cause the removal" + " of the last cluster config/keyring managed by cephadm.\n" + f"It is recommended to add the {SpecialHostLabels.ADMIN} label to another host" + " before completing this operation.\nIf you're certain this is" + " what you want rerun this command with --force.") + + self.add_host_label(hostname, '_no_schedule') + if not keep_conf_keyring: + self.add_host_label(hostname, SpecialHostLabels.DRAIN_CONF_KEYRING) + + daemons: List[orchestrator.DaemonDescription] = self.cache.get_daemons_by_host(hostname) + + osds_to_remove = [d.daemon_id for d in daemons if d.daemon_type == 'osd'] + self.remove_osds(osds_to_remove, zap=zap_osd_devices) + + daemons_table = "" + daemons_table += "{:<20} {:<15}\n".format("type", "id") + daemons_table += "{:<20} {:<15}\n".format("-" * 20, "-" * 15) + for d in daemons: + daemons_table += "{:<20} {:<15}\n".format(d.daemon_type, d.daemon_id) + + return "Scheduled to remove the following daemons from host '{}'\n{}".format(hostname, daemons_table) + + def trigger_connect_dashboard_rgw(self) -> None: + self.need_connect_dashboard_rgw = True + self.event.set() diff --git a/src/pybind/mgr/cephadm/offline_watcher.py b/src/pybind/mgr/cephadm/offline_watcher.py new file mode 100644 index 000000000..2b7751dfc --- /dev/null +++ b/src/pybind/mgr/cephadm/offline_watcher.py @@ -0,0 +1,60 @@ +import logging +from typing import List, Optional, TYPE_CHECKING + +import multiprocessing as mp +import threading + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + + +class OfflineHostWatcher(threading.Thread): + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr = mgr + self.hosts: Optional[List[str]] = None + self.new_hosts: Optional[List[str]] = None + self.stop = False + self.event = threading.Event() + super(OfflineHostWatcher, self).__init__(target=self.run) + + def run(self) -> None: + self.thread_pool = mp.pool.ThreadPool(10) + while not self.stop: + # only need to take action if we have hosts to check + if self.hosts or self.new_hosts: + if self.new_hosts: + self.hosts = self.new_hosts + self.new_hosts = None + logger.debug(f'OfflineHostDetector: Checking if hosts: {self.hosts} are offline.') + assert self.hosts is not None + self.thread_pool.map(self.check_host, self.hosts) + self.event.wait(20) + self.event.clear() + self.thread_pool.close() + self.thread_pool.join() + + def check_host(self, host: str) -> None: + if host not in self.mgr.offline_hosts: + try: + self.mgr.ssh.check_execute_command(host, ['true'], log_command=self.mgr.log_refresh_metadata) + except Exception: + logger.debug(f'OfflineHostDetector: detected {host} to be offline') + # kick serve loop in case corrective action must be taken for offline host + self.mgr._kick_serve_loop() + + def set_hosts(self, hosts: List[str]) -> None: + hosts.sort() + if (not self.hosts or self.hosts != hosts) and hosts: + self.new_hosts = hosts + logger.debug( + f'OfflineHostDetector: Hosts to check if offline swapped to: {self.new_hosts}.') + self.wakeup() + + def wakeup(self) -> None: + self.event.set() + + def shutdown(self) -> None: + self.stop = True + self.wakeup() diff --git a/src/pybind/mgr/cephadm/registry.py b/src/pybind/mgr/cephadm/registry.py new file mode 100644 index 000000000..31e5fb23e --- /dev/null +++ b/src/pybind/mgr/cephadm/registry.py @@ -0,0 +1,65 @@ +import requests +from typing import List, Dict, Tuple +from requests import Response + + +class Registry: + + def __init__(self, url: str): + self._url: str = url + + @property + def api_domain(self) -> str: + if self._url == 'docker.io': + return 'registry-1.docker.io' + return self._url + + def get_token(self, response: Response) -> str: + realm, params = self.parse_www_authenticate(response.headers['Www-Authenticate']) + r = requests.get(realm, params=params) + r.raise_for_status() + ret = r.json() + if 'access_token' in ret: + return ret['access_token'] + if 'token' in ret: + return ret['token'] + raise ValueError(f'Unknown token reply {ret}') + + def parse_www_authenticate(self, text: str) -> Tuple[str, Dict[str, str]]: + # 'Www-Authenticate': 'Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:ceph/ceph:pull"' + r: Dict[str, str] = {} + for token in text.split(','): + key, value = token.split('=', 1) + r[key] = value.strip('"') + realm = r.pop('Bearer realm') + return realm, r + + def get_tags(self, image: str) -> List[str]: + tags = [] + headers = {'Accept': 'application/json'} + url = f'https://{self.api_domain}/v2/{image}/tags/list' + while True: + try: + r = requests.get(url, headers=headers) + except requests.exceptions.ConnectionError as e: + msg = f"Cannot get tags from url '{url}': {e}" + raise ValueError(msg) from e + if r.status_code == 401: + if 'Authorization' in headers: + raise ValueError('failed authentication') + token = self.get_token(r) + headers['Authorization'] = f'Bearer {token}' + continue + r.raise_for_status() + + new_tags = r.json()['tags'] + tags.extend(new_tags) + + if 'Link' not in r.headers: + break + + # strip < > brackets off and prepend the domain + url = f'https://{self.api_domain}' + r.headers['Link'].split(';')[0][1:-1] + continue + + return tags diff --git a/src/pybind/mgr/cephadm/schedule.py b/src/pybind/mgr/cephadm/schedule.py new file mode 100644 index 000000000..6666d761e --- /dev/null +++ b/src/pybind/mgr/cephadm/schedule.py @@ -0,0 +1,481 @@ +import ipaddress +import hashlib +import logging +import random +from typing import List, Optional, Callable, TypeVar, Tuple, NamedTuple, Dict + +import orchestrator +from ceph.deployment.service_spec import ServiceSpec +from orchestrator._interface import DaemonDescription +from orchestrator import OrchestratorValidationError +from .utils import RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES + +logger = logging.getLogger(__name__) +T = TypeVar('T') + + +class DaemonPlacement(NamedTuple): + daemon_type: str + hostname: str + network: str = '' # for mons only + name: str = '' + ip: Optional[str] = None + ports: List[int] = [] + rank: Optional[int] = None + rank_generation: Optional[int] = None + + def __str__(self) -> str: + res = self.daemon_type + ':' + self.hostname + other = [] + if self.rank is not None: + other.append(f'rank={self.rank}.{self.rank_generation}') + if self.network: + other.append(f'network={self.network}') + if self.name: + other.append(f'name={self.name}') + if self.ports: + other.append(f'{self.ip or "*"}:{",".join(map(str, self.ports))}') + if other: + res += '(' + ' '.join(other) + ')' + return res + + def renumber_ports(self, n: int) -> 'DaemonPlacement': + return DaemonPlacement( + self.daemon_type, + self.hostname, + self.network, + self.name, + self.ip, + [p + n for p in self.ports], + self.rank, + self.rank_generation, + ) + + def assign_rank(self, rank: int, gen: int) -> 'DaemonPlacement': + return DaemonPlacement( + self.daemon_type, + self.hostname, + self.network, + self.name, + self.ip, + self.ports, + rank, + gen, + ) + + def assign_name(self, name: str) -> 'DaemonPlacement': + return DaemonPlacement( + self.daemon_type, + self.hostname, + self.network, + name, + self.ip, + self.ports, + self.rank, + self.rank_generation, + ) + + def assign_rank_generation( + self, + rank: int, + rank_map: Dict[int, Dict[int, Optional[str]]] + ) -> 'DaemonPlacement': + if rank not in rank_map: + rank_map[rank] = {} + gen = 0 + else: + gen = max(rank_map[rank].keys()) + 1 + rank_map[rank][gen] = None + return DaemonPlacement( + self.daemon_type, + self.hostname, + self.network, + self.name, + self.ip, + self.ports, + rank, + gen, + ) + + def matches_daemon(self, dd: DaemonDescription) -> bool: + if self.daemon_type != dd.daemon_type: + return False + if self.hostname != dd.hostname: + return False + # fixme: how to match against network? + if self.name and self.name != dd.daemon_id: + return False + if self.ports: + if self.ports != dd.ports and dd.ports: + return False + if self.ip != dd.ip and dd.ip: + return False + return True + + def matches_rank_map( + self, + dd: DaemonDescription, + rank_map: Optional[Dict[int, Dict[int, Optional[str]]]], + ranks: List[int] + ) -> bool: + if rank_map is None: + # daemon should have no rank + return dd.rank is None + + if dd.rank is None: + return False + + if dd.rank not in rank_map: + return False + if dd.rank not in ranks: + return False + + # must be the highest/newest rank_generation + if dd.rank_generation != max(rank_map[dd.rank].keys()): + return False + + # must be *this* daemon + return rank_map[dd.rank][dd.rank_generation] == dd.daemon_id + + +class HostAssignment(object): + + def __init__(self, + spec: ServiceSpec, + hosts: List[orchestrator.HostSpec], + unreachable_hosts: List[orchestrator.HostSpec], + draining_hosts: List[orchestrator.HostSpec], + daemons: List[orchestrator.DaemonDescription], + related_service_daemons: Optional[List[DaemonDescription]] = None, + networks: Dict[str, Dict[str, Dict[str, List[str]]]] = {}, + filter_new_host: Optional[Callable[[str, ServiceSpec], bool]] = None, + allow_colo: bool = False, + primary_daemon_type: Optional[str] = None, + per_host_daemon_type: Optional[str] = None, + rank_map: Optional[Dict[int, Dict[int, Optional[str]]]] = None, + ): + assert spec + self.spec = spec # type: ServiceSpec + self.primary_daemon_type = primary_daemon_type or spec.service_type + self.hosts: List[orchestrator.HostSpec] = hosts + self.unreachable_hosts: List[orchestrator.HostSpec] = unreachable_hosts + self.draining_hosts: List[orchestrator.HostSpec] = draining_hosts + self.filter_new_host = filter_new_host + self.service_name = spec.service_name() + self.daemons = daemons + self.related_service_daemons = related_service_daemons + self.networks = networks + self.allow_colo = allow_colo + self.per_host_daemon_type = per_host_daemon_type + self.ports_start = spec.get_port_start() + self.rank_map = rank_map + + def hosts_by_label(self, label: str) -> List[orchestrator.HostSpec]: + return [h for h in self.hosts if label in h.labels] + + def get_hostnames(self) -> List[str]: + return [h.hostname for h in self.hosts] + + def validate(self) -> None: + self.spec.validate() + + if self.spec.placement.count == 0: + raise OrchestratorValidationError( + f' can not be 0 for {self.spec.one_line_str()}') + + if ( + self.spec.placement.count_per_host is not None + and self.spec.placement.count_per_host > 1 + and not self.allow_colo + ): + raise OrchestratorValidationError( + f'Cannot place more than one {self.spec.service_type} per host' + ) + + if self.spec.placement.hosts: + explicit_hostnames = {h.hostname for h in self.spec.placement.hosts} + known_hosts = self.get_hostnames() + [h.hostname for h in self.draining_hosts] + unknown_hosts = explicit_hostnames.difference(set(known_hosts)) + if unknown_hosts: + raise OrchestratorValidationError( + f'Cannot place {self.spec.one_line_str()} on {", ".join(sorted(unknown_hosts))}: Unknown hosts') + + if self.spec.placement.host_pattern: + pattern_hostnames = self.spec.placement.filter_matching_hostspecs(self.hosts) + if not pattern_hostnames: + raise OrchestratorValidationError( + f'Cannot place {self.spec.one_line_str()}: No matching hosts') + + if self.spec.placement.label: + label_hosts = self.hosts_by_label(self.spec.placement.label) + if not label_hosts: + raise OrchestratorValidationError( + f'Cannot place {self.spec.one_line_str()}: No matching ' + f'hosts for label {self.spec.placement.label}') + + def place_per_host_daemons( + self, + slots: List[DaemonPlacement], + to_add: List[DaemonPlacement], + to_remove: List[orchestrator.DaemonDescription], + ) -> Tuple[List[DaemonPlacement], List[DaemonPlacement], List[orchestrator.DaemonDescription]]: + if self.per_host_daemon_type: + host_slots = [ + DaemonPlacement(daemon_type=self.per_host_daemon_type, + hostname=hostname) + for hostname in set([s.hostname for s in slots]) + ] + existing = [ + d for d in self.daemons if d.daemon_type == self.per_host_daemon_type + ] + slots += host_slots + for dd in existing: + found = False + for p in host_slots: + if p.matches_daemon(dd): + host_slots.remove(p) + found = True + break + if not found: + to_remove.append(dd) + to_add += host_slots + + to_remove = [d for d in to_remove if d.hostname not in [ + h.hostname for h in self.unreachable_hosts]] + + return slots, to_add, to_remove + + def place(self): + # type: () -> Tuple[List[DaemonPlacement], List[DaemonPlacement], List[orchestrator.DaemonDescription]] + """ + Generate a list of HostPlacementSpec taking into account: + + * all known hosts + * hosts with existing daemons + * placement spec + * self.filter_new_host + """ + + self.validate() + + count = self.spec.placement.count + + # get candidate hosts based on [hosts, label, host_pattern] + candidates = self.get_candidates() # type: List[DaemonPlacement] + if self.primary_daemon_type in RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES: + # remove unreachable hosts that are not in maintenance so daemons + # on these hosts will be rescheduled + candidates = self.remove_non_maintenance_unreachable_candidates(candidates) + + def expand_candidates(ls: List[DaemonPlacement], num: int) -> List[DaemonPlacement]: + r = [] + for offset in range(num): + r.extend([dp.renumber_ports(offset) for dp in ls]) + return r + + # consider enough slots to fulfill target count-per-host or count + if count is None: + if self.spec.placement.count_per_host: + per_host = self.spec.placement.count_per_host + else: + per_host = 1 + candidates = expand_candidates(candidates, per_host) + elif self.allow_colo and candidates: + per_host = 1 + ((count - 1) // len(candidates)) + candidates = expand_candidates(candidates, per_host) + + # consider (preserve) existing daemons in a particular order... + daemons = sorted( + [ + d for d in self.daemons if d.daemon_type == self.primary_daemon_type + ], + key=lambda d: ( + not d.is_active, # active before standby + d.rank is not None, # ranked first, then non-ranked + d.rank, # low ranks + 0 - (d.rank_generation or 0), # newer generations first + ) + ) + + # sort candidates into existing/used slots that already have a + # daemon, and others (the rest) + existing_active: List[orchestrator.DaemonDescription] = [] + existing_standby: List[orchestrator.DaemonDescription] = [] + existing_slots: List[DaemonPlacement] = [] + to_add: List[DaemonPlacement] = [] + to_remove: List[orchestrator.DaemonDescription] = [] + ranks: List[int] = list(range(len(candidates))) + others: List[DaemonPlacement] = candidates.copy() + for dd in daemons: + found = False + for p in others: + if p.matches_daemon(dd) and p.matches_rank_map(dd, self.rank_map, ranks): + others.remove(p) + if dd.is_active: + existing_active.append(dd) + else: + existing_standby.append(dd) + if dd.rank is not None: + assert dd.rank_generation is not None + p = p.assign_rank(dd.rank, dd.rank_generation) + ranks.remove(dd.rank) + existing_slots.append(p) + found = True + break + if not found: + to_remove.append(dd) + + # TODO: At some point we want to deploy daemons that are on offline hosts + # at what point we do this differs per daemon type. Stateless daemons we could + # do quickly to improve availability. Stateful daemons we might want to wait longer + # to see if the host comes back online + + existing = existing_active + existing_standby + + # build to_add + if not count: + to_add = [dd for dd in others if dd.hostname not in [ + h.hostname for h in self.unreachable_hosts]] + else: + # The number of new slots that need to be selected in order to fulfill count + need = count - len(existing) + + # we don't need any additional placements + if need <= 0: + to_remove.extend(existing[count:]) + del existing_slots[count:] + return self.place_per_host_daemons(existing_slots, [], to_remove) + + if self.related_service_daemons: + # prefer to put daemons on the same host(s) as daemons of the related service + # Note that we are only doing this over picking arbitrary hosts to satisfy + # the count. We are not breaking any deterministic placements in order to + # match the placement with a related service. + related_service_hosts = list(set(dd.hostname for dd in self.related_service_daemons)) + matching_dps = [dp for dp in others if dp.hostname in related_service_hosts] + for dp in matching_dps: + if need <= 0: + break + if dp.hostname in related_service_hosts and dp.hostname not in [h.hostname for h in self.unreachable_hosts]: + logger.debug(f'Preferring {dp.hostname} for service {self.service_name} as related daemons have been placed there') + to_add.append(dp) + need -= 1 # this is last use of need so it can work as a counter + # at this point, we've either met our placement quota entirely using hosts with related + # service daemons, or we still need to place more. If we do need to place more, + # we should make sure not to re-use hosts with related service daemons by filtering + # them out from the "others" list + if need > 0: + others = [dp for dp in others if dp.hostname not in related_service_hosts] + + for dp in others: + if need <= 0: + break + if dp.hostname not in [h.hostname for h in self.unreachable_hosts]: + to_add.append(dp) + need -= 1 # this is last use of need in this function so it can work as a counter + + if self.rank_map is not None: + # assign unused ranks (and rank_generations) to to_add + assert len(ranks) >= len(to_add) + for i in range(len(to_add)): + to_add[i] = to_add[i].assign_rank_generation(ranks[i], self.rank_map) + + logger.debug('Combine hosts with existing daemons %s + new hosts %s' % (existing, to_add)) + return self.place_per_host_daemons(existing_slots + to_add, to_add, to_remove) + + def find_ip_on_host(self, hostname: str, subnets: List[str]) -> Optional[str]: + for subnet in subnets: + ips: List[str] = [] + # following is to allow loopback interfaces for both ipv4 and ipv6. Since we + # only have the subnet (and no IP) we assume default loopback IP address. + if ipaddress.ip_network(subnet).is_loopback: + if ipaddress.ip_network(subnet).version == 4: + ips.append('127.0.0.1') + else: + ips.append('::1') + for iface, iface_ips in self.networks.get(hostname, {}).get(subnet, {}).items(): + ips.extend(iface_ips) + if ips: + return sorted(ips)[0] + return None + + def get_candidates(self) -> List[DaemonPlacement]: + if self.spec.placement.hosts: + ls = [ + DaemonPlacement(daemon_type=self.primary_daemon_type, + hostname=h.hostname, network=h.network, name=h.name, + ports=self.ports_start) + for h in self.spec.placement.hosts if h.hostname not in [dh.hostname for dh in self.draining_hosts] + ] + elif self.spec.placement.label: + ls = [ + DaemonPlacement(daemon_type=self.primary_daemon_type, + hostname=x.hostname, ports=self.ports_start) + for x in self.hosts_by_label(self.spec.placement.label) + ] + elif self.spec.placement.host_pattern: + ls = [ + DaemonPlacement(daemon_type=self.primary_daemon_type, + hostname=x, ports=self.ports_start) + for x in self.spec.placement.filter_matching_hostspecs(self.hosts) + ] + elif ( + self.spec.placement.count is not None + or self.spec.placement.count_per_host is not None + ): + ls = [ + DaemonPlacement(daemon_type=self.primary_daemon_type, + hostname=x.hostname, ports=self.ports_start) + for x in self.hosts + ] + else: + raise OrchestratorValidationError( + "placement spec is empty: no hosts, no label, no pattern, no count") + + # allocate an IP? + if self.spec.networks: + orig = ls.copy() + ls = [] + for p in orig: + ip = self.find_ip_on_host(p.hostname, self.spec.networks) + if ip: + ls.append(DaemonPlacement(daemon_type=self.primary_daemon_type, + hostname=p.hostname, network=p.network, + name=p.name, ports=p.ports, ip=ip)) + else: + logger.debug( + f'Skipping {p.hostname} with no IP in network(s) {self.spec.networks}' + ) + + if self.filter_new_host: + old = ls.copy() + ls = [] + for h in old: + if self.filter_new_host(h.hostname, self.spec): + ls.append(h) + if len(old) > len(ls): + logger.debug('Filtered %s down to %s' % (old, ls)) + + # now that we have the list of nodes candidates based on the configured + # placement, let's shuffle the list for node pseudo-random selection. For this, + # we generate a seed from the service name and we use to shuffle the candidates. + # This makes shuffling deterministic for the same service name. + seed = int( + hashlib.sha1(self.spec.service_name().encode('utf-8')).hexdigest(), + 16 + ) % (2 ** 32) # truncate result to 32 bits + final = sorted(ls) + random.Random(seed).shuffle(final) + return final + + def remove_non_maintenance_unreachable_candidates(self, candidates: List[DaemonPlacement]) -> List[DaemonPlacement]: + in_maintenance: Dict[str, bool] = {} + for h in self.hosts: + if h.status.lower() == 'maintenance': + in_maintenance[h.hostname] = True + continue + in_maintenance[h.hostname] = False + unreachable_hosts = [h.hostname for h in self.unreachable_hosts] + candidates = [ + c for c in candidates if c.hostname not in unreachable_hosts or in_maintenance[c.hostname]] + return candidates diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py new file mode 100644 index 000000000..5dfdc27a3 --- /dev/null +++ b/src/pybind/mgr/cephadm/serve.py @@ -0,0 +1,1680 @@ +import ipaddress +import hashlib +import json +import logging +import uuid +import os +from collections import defaultdict +from typing import TYPE_CHECKING, Optional, List, cast, Dict, Any, Union, Tuple, Set, \ + DefaultDict, Callable + +from ceph.deployment import inventory +from ceph.deployment.drive_group import DriveGroupSpec +from ceph.deployment.service_spec import ( + ArgumentList, + ArgumentSpec, + CustomContainerSpec, + PlacementSpec, + RGWSpec, + ServiceSpec, + IngressSpec, +) +from ceph.utils import datetime_now + +import orchestrator +from orchestrator import OrchestratorError, set_exception_subject, OrchestratorEvent, \ + DaemonDescriptionStatus, daemon_type_to_service +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec +from cephadm.schedule import HostAssignment +from cephadm.autotune import MemoryAutotuner +from cephadm.utils import forall_hosts, cephadmNoImage, is_repo_digest, \ + CephadmNoImage, CEPH_TYPES, ContainerInspectInfo, SpecialHostLabels +from mgr_module import MonCommandFailed +from mgr_util import format_bytes, verify_tls, get_cert_issuer_info, ServerConfigException + +from . import utils +from . import exchange + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + +REQUIRES_POST_ACTIONS = ['grafana', 'iscsi', 'prometheus', 'alertmanager', 'rgw'] + + +class CephadmServe: + """ + This module contains functions that are executed in the + serve() thread. Thus they don't block the CLI. + + Please see the `Note regarding network calls from CLI handlers` + chapter in the cephadm developer guide. + + On the other hand, These function should *not* be called form + CLI handlers, to avoid blocking the CLI + """ + + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr: "CephadmOrchestrator" = mgr + self.log = logger + + def serve(self) -> None: + """ + The main loop of cephadm. + + A command handler will typically change the declarative state + of cephadm. This loop will then attempt to apply this new state. + """ + self.log.debug("serve starting") + self.mgr.config_checker.load_network_config() + + while self.mgr.run: + self.log.debug("serve loop start") + + try: + + self.convert_tags_to_repo_digest() + + # refresh daemons + self.log.debug('refreshing hosts and daemons') + self._refresh_hosts_and_daemons() + + self._check_for_strays() + + self._update_paused_health() + + if self.mgr.need_connect_dashboard_rgw and self.mgr.config_dashboard: + self.mgr.need_connect_dashboard_rgw = False + if 'dashboard' in self.mgr.get('mgr_map')['modules']: + self.log.info('Checking dashboard <-> RGW credentials') + self.mgr.remote('dashboard', 'set_rgw_credentials') + + if not self.mgr.paused: + self._run_async_actions() + + self.mgr.to_remove_osds.process_removal_queue() + + self.mgr.migration.migrate() + if self.mgr.migration.is_migration_ongoing(): + continue + + if self._apply_all_services(): + continue # did something, refresh + + self._check_daemons() + + self._check_certificates() + + self._purge_deleted_services() + + self._check_for_moved_osds() + + if self.mgr.agent_helpers._handle_use_agent_setting(): + continue + + if self.mgr.upgrade.continue_upgrade(): + continue + + except OrchestratorError as e: + if e.event_subject: + self.mgr.events.from_orch_error(e) + + self.log.debug("serve loop sleep") + self._serve_sleep() + self.log.debug("serve loop wake") + self.log.debug("serve exit") + + def _check_certificates(self) -> None: + for d in self.mgr.cache.get_daemons_by_type('grafana'): + cert = self.mgr.get_store(f'{d.hostname}/grafana_crt') + key = self.mgr.get_store(f'{d.hostname}/grafana_key') + if (not cert or not cert.strip()) and (not key or not key.strip()): + # certificate/key are empty... nothing to check + return + + try: + get_cert_issuer_info(cert) + verify_tls(cert, key) + self.mgr.remove_health_warning('CEPHADM_CERT_ERROR') + except ServerConfigException as e: + err_msg = f""" + Detected invalid grafana certificates. Please, use the following commands: + + > ceph config-key set mgr/cephadm/{d.hostname}/grafana_crt -i + > ceph config-key set mgr/cephadm/{d.hostname}/grafana_key -i + + to set valid key and certificate or reset their value to an empty string + in case you want cephadm to generate self-signed Grafana certificates. + + Once done, run the following command to reconfig the daemon: + + > ceph orch daemon reconfig grafana.{d.hostname} + + """ + self.log.error(f'Detected invalid grafana certificate on host {d.hostname}: {e}') + self.mgr.set_health_warning('CEPHADM_CERT_ERROR', + f'Invalid grafana certificate on host {d.hostname}: {e}', + 1, [err_msg]) + break + + def _serve_sleep(self) -> None: + sleep_interval = max( + 30, + min( + self.mgr.host_check_interval, + self.mgr.facts_cache_timeout, + self.mgr.daemon_cache_timeout, + self.mgr.device_cache_timeout, + ) + ) + self.log.debug('Sleeping for %d seconds', sleep_interval) + self.mgr.event.wait(sleep_interval) + self.mgr.event.clear() + + def _update_paused_health(self) -> None: + self.log.debug('_update_paused_health') + if self.mgr.paused: + self.mgr.set_health_warning('CEPHADM_PAUSED', 'cephadm background work is paused', 1, [ + "'ceph orch resume' to resume"]) + else: + self.mgr.remove_health_warning('CEPHADM_PAUSED') + + def _autotune_host_memory(self, host: str) -> None: + total_mem = self.mgr.cache.get_facts(host).get('memory_total_kb', 0) + if not total_mem: + val = None + else: + total_mem *= 1024 # kb -> bytes + total_mem *= self.mgr.autotune_memory_target_ratio + a = MemoryAutotuner( + daemons=self.mgr.cache.get_daemons_by_host(host), + config_get=self.mgr.get_foreign_ceph_option, + total_mem=total_mem, + ) + val, osds = a.tune() + any_changed = False + for o in osds: + if self.mgr.get_foreign_ceph_option(o, 'osd_memory_target') != val: + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': o, + 'name': 'osd_memory_target', + }) + any_changed = True + if val is not None: + if any_changed: + self.mgr.log.info( + f'Adjusting osd_memory_target on {host} to {format_bytes(val, 6)}' + ) + ret, out, err = self.mgr.mon_command({ + 'prefix': 'config set', + 'who': f'osd/host:{host.split(".")[0]}', + 'name': 'osd_memory_target', + 'value': str(val), + }) + if ret: + self.log.warning( + f'Unable to set osd_memory_target on {host} to {val}: {err}' + ) + else: + # if osd memory autotuning is off, we don't want to remove these config + # options as users may be using them. Since there is no way to set autotuning + # on/off at a host level, best we can do is check if it is globally on. + if self.mgr.get_foreign_ceph_option('osd', 'osd_memory_target_autotune'): + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': f'osd/host:{host.split(".")[0]}', + 'name': 'osd_memory_target', + }) + self.mgr.cache.update_autotune(host) + + def _refresh_hosts_and_daemons(self) -> None: + self.log.debug('_refresh_hosts_and_daemons') + bad_hosts = [] + failures = [] + agents_down: List[str] = [] + + @forall_hosts + def refresh(host: str) -> None: + + # skip hosts that are in maintenance - they could be powered off + if self.mgr.inventory._inventory[host].get("status", "").lower() == "maintenance": + return + + if self.mgr.use_agent: + if self.mgr.agent_helpers._check_agent(host): + agents_down.append(host) + + if self.mgr.cache.host_needs_check(host): + r = self._check_host(host) + if r is not None: + bad_hosts.append(r) + + if ( + not self.mgr.use_agent + or self.mgr.cache.is_host_draining(host) + or host in agents_down + ): + if self.mgr.cache.host_needs_daemon_refresh(host): + self.log.debug('refreshing %s daemons' % host) + r = self._refresh_host_daemons(host) + if r: + failures.append(r) + + if self.mgr.cache.host_needs_facts_refresh(host): + self.log.debug(('Refreshing %s facts' % host)) + r = self._refresh_facts(host) + if r: + failures.append(r) + + if self.mgr.cache.host_needs_network_refresh(host): + self.log.debug(('Refreshing %s networks' % host)) + r = self._refresh_host_networks(host) + if r: + failures.append(r) + + if self.mgr.cache.host_needs_device_refresh(host): + self.log.debug('refreshing %s devices' % host) + r = self._refresh_host_devices(host) + if r: + failures.append(r) + self.mgr.cache.metadata_up_to_date[host] = True + elif not self.mgr.cache.get_daemons_by_type('agent', host=host): + if self.mgr.cache.host_needs_daemon_refresh(host): + self.log.debug('refreshing %s daemons' % host) + r = self._refresh_host_daemons(host) + if r: + failures.append(r) + self.mgr.cache.metadata_up_to_date[host] = True + + if self.mgr.cache.host_needs_registry_login(host) and self.mgr.get_store('registry_credentials'): + self.log.debug(f"Logging `{host}` into custom registry") + with self.mgr.async_timeout_handler(host, 'cephadm registry-login'): + r = self.mgr.wait_async(self._registry_login( + host, json.loads(str(self.mgr.get_store('registry_credentials'))))) + if r: + bad_hosts.append(r) + + if self.mgr.cache.host_needs_osdspec_preview_refresh(host): + self.log.debug(f"refreshing OSDSpec previews for {host}") + r = self._refresh_host_osdspec_previews(host) + if r: + failures.append(r) + + if ( + self.mgr.cache.host_needs_autotune_memory(host) + and not self.mgr.inventory.has_label(host, SpecialHostLabels.NO_MEMORY_AUTOTUNE) + ): + self.log.debug(f"autotuning memory for {host}") + self._autotune_host_memory(host) + + refresh(self.mgr.cache.get_hosts()) + + self._write_all_client_files() + + self.mgr.agent_helpers._update_agent_down_healthcheck(agents_down) + self.mgr.http_server.config_update() + + self.mgr.config_checker.run_checks() + + for k in [ + 'CEPHADM_HOST_CHECK_FAILED', + 'CEPHADM_REFRESH_FAILED', + ]: + self.mgr.remove_health_warning(k) + if bad_hosts: + self.mgr.set_health_warning( + 'CEPHADM_HOST_CHECK_FAILED', f'{len(bad_hosts)} hosts fail cephadm check', len(bad_hosts), bad_hosts) + if failures: + self.mgr.set_health_warning( + 'CEPHADM_REFRESH_FAILED', 'failed to probe daemons or devices', len(failures), failures) + self.mgr.update_failed_daemon_health_check() + + def _check_host(self, host: str) -> Optional[str]: + if host not in self.mgr.inventory: + return None + self.log.debug(' checking %s' % host) + try: + addr = self.mgr.inventory.get_addr(host) if host in self.mgr.inventory else host + with self.mgr.async_timeout_handler(host, 'cephadm check-host'): + out, err, code = self.mgr.wait_async(self._run_cephadm( + host, cephadmNoImage, 'check-host', [], + error_ok=True, no_fsid=True, log_output=self.mgr.log_refresh_metadata)) + self.mgr.cache.update_last_host_check(host) + self.mgr.cache.save_host(host) + if code: + self.log.debug(' host %s (%s) failed check' % (host, addr)) + if self.mgr.warn_on_failed_host_check: + return 'host %s (%s) failed check: %s' % (host, addr, err) + else: + self.log.debug(' host %s (%s) ok' % (host, addr)) + except Exception as e: + self.log.debug(' host %s (%s) failed check' % (host, addr)) + return 'host %s (%s) failed check: %s' % (host, addr, e) + return None + + def _refresh_host_daemons(self, host: str) -> Optional[str]: + try: + with self.mgr.async_timeout_handler(host, 'cephadm ls'): + ls = self.mgr.wait_async(self._run_cephadm_json( + host, 'mon', 'ls', [], no_fsid=True, log_output=self.mgr.log_refresh_metadata)) + except OrchestratorError as e: + return str(e) + self.mgr._process_ls_output(host, ls) + return None + + def _refresh_facts(self, host: str) -> Optional[str]: + try: + with self.mgr.async_timeout_handler(host, 'cephadm gather-facts'): + val = self.mgr.wait_async(self._run_cephadm_json( + host, cephadmNoImage, 'gather-facts', [], + no_fsid=True, log_output=self.mgr.log_refresh_metadata)) + except OrchestratorError as e: + return str(e) + + self.mgr.cache.update_host_facts(host, val) + + return None + + def _refresh_host_devices(self, host: str) -> Optional[str]: + with_lsm = self.mgr.device_enhanced_scan + list_all = self.mgr.inventory_list_all + inventory_args = ['--', 'inventory', + '--format=json-pretty', + '--filter-for-batch'] + if with_lsm: + inventory_args.insert(-1, "--with-lsm") + if list_all: + inventory_args.insert(-1, "--list-all") + + try: + try: + with self.mgr.async_timeout_handler(host, 'cephadm ceph-volume -- inventory'): + devices = self.mgr.wait_async(self._run_cephadm_json( + host, 'osd', 'ceph-volume', inventory_args, log_output=self.mgr.log_refresh_metadata)) + except OrchestratorError as e: + if 'unrecognized arguments: --filter-for-batch' in str(e): + rerun_args = inventory_args.copy() + rerun_args.remove('--filter-for-batch') + with self.mgr.async_timeout_handler(host, 'cephadm ceph-volume -- inventory'): + devices = self.mgr.wait_async(self._run_cephadm_json( + host, 'osd', 'ceph-volume', rerun_args, log_output=self.mgr.log_refresh_metadata)) + else: + raise + + except OrchestratorError as e: + return str(e) + + self.log.debug('Refreshed host %s devices (%d)' % ( + host, len(devices))) + ret = inventory.Devices.from_json(devices) + self.mgr.cache.update_host_devices(host, ret.devices) + self.update_osdspec_previews(host) + self.mgr.cache.save_host(host) + return None + + def _refresh_host_networks(self, host: str) -> Optional[str]: + try: + with self.mgr.async_timeout_handler(host, 'cephadm list-networks'): + networks = self.mgr.wait_async(self._run_cephadm_json( + host, 'mon', 'list-networks', [], no_fsid=True, log_output=self.mgr.log_refresh_metadata)) + except OrchestratorError as e: + return str(e) + + self.log.debug('Refreshed host %s networks (%s)' % ( + host, len(networks))) + self.mgr.cache.update_host_networks(host, networks) + self.mgr.cache.save_host(host) + return None + + def _refresh_host_osdspec_previews(self, host: str) -> Optional[str]: + self.update_osdspec_previews(host) + self.mgr.cache.save_host(host) + self.log.debug(f'Refreshed OSDSpec previews for host <{host}>') + return None + + def update_osdspec_previews(self, search_host: str = '') -> None: + # Set global 'pending' flag for host + self.mgr.cache.loading_osdspec_preview.add(search_host) + previews = [] + # query OSDSpecs for host and generate/get the preview + # There can be multiple previews for one host due to multiple OSDSpecs. + previews.extend(self.mgr.osd_service.get_previews(search_host)) + self.log.debug(f'Loading OSDSpec previews to HostCache for host <{search_host}>') + self.mgr.cache.osdspec_previews[search_host] = previews + # Unset global 'pending' flag for host + self.mgr.cache.loading_osdspec_preview.remove(search_host) + + def _run_async_actions(self) -> None: + while self.mgr.scheduled_async_actions: + (self.mgr.scheduled_async_actions.pop(0))() + + def _check_for_strays(self) -> None: + self.log.debug('_check_for_strays') + for k in ['CEPHADM_STRAY_HOST', + 'CEPHADM_STRAY_DAEMON']: + self.mgr.remove_health_warning(k) + if self.mgr.warn_on_stray_hosts or self.mgr.warn_on_stray_daemons: + ls = self.mgr.list_servers() + self.log.debug(ls) + managed = self.mgr.cache.get_daemon_names() + host_detail = [] # type: List[str] + host_num_daemons = 0 + daemon_detail = [] # type: List[str] + for item in ls: + host = item.get('hostname') + assert isinstance(host, str) + daemons = item.get('services') # misnomer! + assert isinstance(daemons, list) + missing_names = [] + for s in daemons: + daemon_id = s.get('id') + assert daemon_id + name = '%s.%s' % (s.get('type'), daemon_id) + if s.get('type') in ['rbd-mirror', 'cephfs-mirror', 'rgw', 'rgw-nfs']: + metadata = self.mgr.get_metadata( + cast(str, s.get('type')), daemon_id, {}) + assert metadata is not None + try: + if s.get('type') == 'rgw-nfs': + # https://tracker.ceph.com/issues/49573 + name = metadata['id'][:-4] + else: + name = '%s.%s' % (s.get('type'), metadata['id']) + except (KeyError, TypeError): + self.log.debug( + "Failed to find daemon id for %s service %s" % ( + s.get('type'), s.get('id') + ) + ) + if s.get('type') == 'tcmu-runner': + # because we don't track tcmu-runner daemons in the host cache + # and don't have a way to check if the daemon is part of iscsi service + # we assume that all tcmu-runner daemons are managed by cephadm + managed.append(name) + if host not in self.mgr.inventory: + missing_names.append(name) + host_num_daemons += 1 + if name not in managed: + daemon_detail.append( + 'stray daemon %s on host %s not managed by cephadm' % (name, host)) + if missing_names: + host_detail.append( + 'stray host %s has %d stray daemons: %s' % ( + host, len(missing_names), missing_names)) + if self.mgr.warn_on_stray_hosts and host_detail: + self.mgr.set_health_warning( + 'CEPHADM_STRAY_HOST', f'{len(host_detail)} stray host(s) with {host_num_daemons} daemon(s) not managed by cephadm', len(host_detail), host_detail) + if self.mgr.warn_on_stray_daemons and daemon_detail: + self.mgr.set_health_warning( + 'CEPHADM_STRAY_DAEMON', f'{len(daemon_detail)} stray daemon(s) not managed by cephadm', len(daemon_detail), daemon_detail) + + def _check_for_moved_osds(self) -> None: + self.log.debug('_check_for_moved_osds') + all_osds: DefaultDict[int, List[orchestrator.DaemonDescription]] = defaultdict(list) + for dd in self.mgr.cache.get_daemons_by_type('osd'): + assert dd.daemon_id + all_osds[int(dd.daemon_id)].append(dd) + for osd_id, dds in all_osds.items(): + if len(dds) <= 1: + continue + running = [dd for dd in dds if dd.status == DaemonDescriptionStatus.running] + error = [dd for dd in dds if dd.status == DaemonDescriptionStatus.error] + msg = f'Found duplicate OSDs: {", ".join(str(dd) for dd in dds)}' + logger.info(msg) + if len(running) != 1: + continue + osd = self.mgr.get_osd_by_id(osd_id) + if not osd or not osd['up']: + continue + for e in error: + assert e.hostname + try: + self._remove_daemon(e.name(), e.hostname, no_post_remove=True) + self.mgr.events.for_daemon( + e.name(), 'INFO', f"Removed duplicated daemon on host '{e.hostname}'") + except OrchestratorError as ex: + self.mgr.events.from_orch_error(ex) + logger.exception(f'failed to remove duplicated daemon {e}') + + def _apply_all_services(self) -> bool: + self.log.debug('_apply_all_services') + r = False + specs = [] # type: List[ServiceSpec] + # if metadata is not up to date, we still need to apply spec for agent + # since the agent is the one who gather the metadata. If we don't we + # end up stuck between wanting metadata to be up to date to apply specs + # and needing to apply the agent spec to get up to date metadata + if self.mgr.use_agent and not self.mgr.cache.all_host_metadata_up_to_date(): + self.log.info('Metadata not up to date on all hosts. Skipping non agent specs') + try: + specs.append(self.mgr.spec_store['agent'].spec) + except Exception as e: + self.log.debug(f'Failed to find agent spec: {e}') + self.mgr.agent_helpers._apply_agent() + return r + else: + _specs: List[ServiceSpec] = [] + for sn, spec in self.mgr.spec_store.active_specs.items(): + _specs.append(spec) + # apply specs that don't use count first sice their placement is deterministic + # and not dependant on other daemon's placements in any way + specs = [s for s in _specs if not s.placement.count] + [s for s in _specs if s.placement.count] + + for name in ['CEPHADM_APPLY_SPEC_FAIL', 'CEPHADM_DAEMON_PLACE_FAIL']: + self.mgr.remove_health_warning(name) + self.mgr.apply_spec_fails = [] + for spec in specs: + try: + if self._apply_service(spec): + r = True + except Exception as e: + msg = f'Failed to apply {spec.service_name()} spec {spec}: {str(e)}' + self.log.exception(msg) + self.mgr.events.for_service(spec, 'ERROR', 'Failed to apply: ' + str(e)) + self.mgr.apply_spec_fails.append((spec.service_name(), str(e))) + warnings = [] + for x in self.mgr.apply_spec_fails: + warnings.append(f'{x[0]}: {x[1]}') + self.mgr.set_health_warning('CEPHADM_APPLY_SPEC_FAIL', + f"Failed to apply {len(self.mgr.apply_spec_fails)} service(s): {','.join(x[0] for x in self.mgr.apply_spec_fails)}", + len(self.mgr.apply_spec_fails), + warnings) + self.mgr.update_watched_hosts() + self.mgr.tuned_profile_utils._write_all_tuned_profiles() + return r + + def _apply_service_config(self, spec: ServiceSpec) -> None: + if spec.config: + section = utils.name_to_config_section(spec.service_name()) + for name in ['CEPHADM_INVALID_CONFIG_OPTION', 'CEPHADM_FAILED_SET_OPTION']: + self.mgr.remove_health_warning(name) + invalid_config_options = [] + options_failed_to_set = [] + for k, v in spec.config.items(): + try: + current = self.mgr.get_foreign_ceph_option(section, k) + except KeyError: + msg = f'Ignoring invalid {spec.service_name()} config option {k}' + self.log.warning(msg) + self.mgr.events.for_service( + spec, OrchestratorEvent.ERROR, f'Invalid config option {k}' + ) + invalid_config_options.append(msg) + continue + if current != v: + self.log.debug(f'setting [{section}] {k} = {v}') + try: + self.mgr.check_mon_command({ + 'prefix': 'config set', + 'name': k, + 'value': str(v), + 'who': section, + }) + except MonCommandFailed as e: + msg = f'Failed to set {spec.service_name()} option {k}: {e}' + self.log.warning(msg) + options_failed_to_set.append(msg) + + if invalid_config_options: + self.mgr.set_health_warning('CEPHADM_INVALID_CONFIG_OPTION', f'Ignoring {len(invalid_config_options)} invalid config option(s)', len( + invalid_config_options), invalid_config_options) + if options_failed_to_set: + self.mgr.set_health_warning('CEPHADM_FAILED_SET_OPTION', f'Failed to set {len(options_failed_to_set)} option(s)', len( + options_failed_to_set), options_failed_to_set) + + def _update_rgw_endpoints(self, rgw_spec: RGWSpec) -> None: + + if not rgw_spec.update_endpoints or rgw_spec.rgw_realm_token is None: + return + + ep = [] + protocol = 'https' if rgw_spec.ssl else 'http' + for s in self.mgr.cache.get_daemons_by_service(rgw_spec.service_name()): + if s.ports: + for p in s.ports: + ep.append(f'{protocol}://{s.hostname}:{p}') + zone_update_cmd = { + 'prefix': 'rgw zone modify', + 'realm_name': rgw_spec.rgw_realm, + 'zonegroup_name': rgw_spec.rgw_zonegroup, + 'zone_name': rgw_spec.rgw_zone, + 'realm_token': rgw_spec.rgw_realm_token, + 'zone_endpoints': ep, + } + self.log.debug(f'rgw cmd: {zone_update_cmd}') + rc, out, err = self.mgr.mon_command(zone_update_cmd) + rgw_spec.update_endpoints = (rc != 0) # keep trying on failure + if rc != 0: + self.log.error(f'Error when trying to update rgw zone: {err}') + self.mgr.set_health_warning('CEPHADM_RGW', 'Cannot update rgw endpoints, error: {err}', 1, + [f'Cannot update rgw endpoints for daemon {rgw_spec.service_name()}, error: {err}']) + else: + self.mgr.remove_health_warning('CEPHADM_RGW') + + def _apply_service(self, spec: ServiceSpec) -> bool: + """ + Schedule a service. Deploy new daemons or remove old ones, depending + on the target label and count specified in the placement. + """ + self.mgr.migration.verify_no_migration() + + service_type = spec.service_type + service_name = spec.service_name() + if spec.unmanaged: + self.log.debug('Skipping unmanaged service %s' % service_name) + return False + if spec.preview_only: + self.log.debug('Skipping preview_only service %s' % service_name) + return False + self.log.debug('Applying service %s spec' % service_name) + + if service_type == 'agent': + try: + assert self.mgr.http_server.agent + assert self.mgr.http_server.agent.ssl_certs.get_root_cert() + except Exception: + self.log.info( + 'Delaying applying agent spec until cephadm endpoint root cert created') + return False + + self._apply_service_config(spec) + + if service_type == 'osd': + self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) + # TODO: return True would result in a busy loop + # can't know if daemon count changed; create_from_spec doesn't + # return a solid indication + return False + + svc = self.mgr.cephadm_services[service_type] + daemons = self.mgr.cache.get_daemons_by_service(service_name) + related_service_daemons = self.mgr.cache.get_related_service_daemons(spec) + + public_networks: List[str] = [] + if service_type == 'mon': + out = str(self.mgr.get_foreign_ceph_option('mon', 'public_network')) + if '/' in out: + public_networks = [x.strip() for x in out.split(',')] + self.log.debug('mon public_network(s) is %s' % public_networks) + + def matches_public_network(host: str, sspec: ServiceSpec) -> bool: + # make sure the host has at least one network that belongs to some configured public network(s) + for pn in public_networks: + public_network = ipaddress.ip_network(pn) + for hn in self.mgr.cache.networks[host]: + host_network = ipaddress.ip_network(hn) + if host_network.overlaps(public_network): + return True + + host_networks = ','.join(self.mgr.cache.networks[host]) + pub_networks = ','.join(public_networks) + self.log.info( + f"Filtered out host {host}: does not belong to mon public_network(s): " + f" {pub_networks}, host network(s): {host_networks}" + ) + return False + + def has_interface_for_vip(host: str, sspec: ServiceSpec) -> bool: + # make sure the host has an interface that can + # actually accomodate the VIP + if not sspec or sspec.service_type != 'ingress': + return True + ingress_spec = cast(IngressSpec, sspec) + virtual_ips = [] + if ingress_spec.virtual_ip: + virtual_ips.append(ingress_spec.virtual_ip) + elif ingress_spec.virtual_ips_list: + virtual_ips = ingress_spec.virtual_ips_list + for vip in virtual_ips: + found = False + bare_ip = str(vip).split('/')[0] + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet): + # found matching interface for this IP, move on + self.log.debug( + f'{bare_ip} is in {subnet} on {host} interface {list(ifaces.keys())[0]}' + ) + found = True + break + if not found: + self.log.info( + f"Filtered out host {host}: Host has no interface available for VIP: {vip}" + ) + return False + return True + + host_filters: Dict[str, Callable[[str, ServiceSpec], bool]] = { + 'mon': matches_public_network, + 'ingress': has_interface_for_vip + } + + rank_map = None + if svc.ranked(): + rank_map = self.mgr.spec_store[spec.service_name()].rank_map or {} + ha = HostAssignment( + spec=spec, + hosts=self.mgr.cache.get_non_draining_hosts() if spec.service_name( + ) == 'agent' else self.mgr.cache.get_schedulable_hosts(), + unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), + daemons=daemons, + related_service_daemons=related_service_daemons, + networks=self.mgr.cache.networks, + filter_new_host=host_filters.get(service_type, None), + allow_colo=svc.allow_colo(), + primary_daemon_type=svc.primary_daemon_type(spec), + per_host_daemon_type=svc.per_host_daemon_type(spec), + rank_map=rank_map, + ) + + try: + all_slots, slots_to_add, daemons_to_remove = ha.place() + daemons_to_remove = [d for d in daemons_to_remove if (d.hostname and self.mgr.inventory._inventory[d.hostname].get( + 'status', '').lower() not in ['maintenance', 'offline'] and d.hostname not in self.mgr.offline_hosts)] + self.log.debug('Add %s, remove %s' % (slots_to_add, daemons_to_remove)) + except OrchestratorError as e: + msg = f'Failed to apply {spec.service_name()} spec {spec}: {str(e)}' + self.log.error(msg) + self.mgr.events.for_service(spec, 'ERROR', 'Failed to apply: ' + str(e)) + self.mgr.apply_spec_fails.append((spec.service_name(), str(e))) + warnings = [] + for x in self.mgr.apply_spec_fails: + warnings.append(f'{x[0]}: {x[1]}') + self.mgr.set_health_warning('CEPHADM_APPLY_SPEC_FAIL', + f"Failed to apply {len(self.mgr.apply_spec_fails)} service(s): {','.join(x[0] for x in self.mgr.apply_spec_fails)}", + len(self.mgr.apply_spec_fails), + warnings) + return False + + r = None + + # sanity check + final_count = len(daemons) + len(slots_to_add) - len(daemons_to_remove) + if service_type in ['mon', 'mgr'] and final_count < 1: + self.log.debug('cannot scale mon|mgr below 1)') + return False + + # progress + progress_id = str(uuid.uuid4()) + delta: List[str] = [] + if slots_to_add: + delta += [f'+{len(slots_to_add)}'] + if daemons_to_remove: + delta += [f'-{len(daemons_to_remove)}'] + progress_title = f'Updating {spec.service_name()} deployment ({" ".join(delta)} -> {len(all_slots)})' + progress_total = len(slots_to_add) + len(daemons_to_remove) + progress_done = 0 + + def update_progress() -> None: + self.mgr.remote( + 'progress', 'update', progress_id, + ev_msg=progress_title, + ev_progress=(progress_done / progress_total), + add_to_ceph_s=True, + ) + + if progress_total: + update_progress() + + self.log.debug('Hosts that will receive new daemons: %s' % slots_to_add) + self.log.debug('Daemons that will be removed: %s' % daemons_to_remove) + + hosts_altered: Set[str] = set() + + try: + # assign names + for i in range(len(slots_to_add)): + slot = slots_to_add[i] + slot = slot.assign_name(self.mgr.get_unique_name( + slot.daemon_type, + slot.hostname, + [d for d in daemons if d not in daemons_to_remove], + prefix=spec.service_id, + forcename=slot.name, + rank=slot.rank, + rank_generation=slot.rank_generation, + )) + slots_to_add[i] = slot + if rank_map is not None: + assert slot.rank is not None + assert slot.rank_generation is not None + assert rank_map[slot.rank][slot.rank_generation] is None + rank_map[slot.rank][slot.rank_generation] = slot.name + + if rank_map: + # record the rank_map before we make changes so that if we fail the + # next mgr will clean up. + self.mgr.spec_store.save_rank_map(spec.service_name(), rank_map) + + # remove daemons now, since we are going to fence them anyway + for d in daemons_to_remove: + assert d.hostname is not None + self._remove_daemon(d.name(), d.hostname) + daemons_to_remove = [] + + # fence them + svc.fence_old_ranks(spec, rank_map, len(all_slots)) + + # create daemons + daemon_place_fails = [] + for slot in slots_to_add: + # first remove daemon with conflicting port or name? + if slot.ports or slot.name in [d.name() for d in daemons_to_remove]: + for d in daemons_to_remove: + if ( + d.hostname != slot.hostname + or not (set(d.ports or []) & set(slot.ports)) + or (d.ip and slot.ip and d.ip != slot.ip) + and d.name() != slot.name + ): + continue + if d.name() != slot.name: + self.log.info( + f'Removing {d.name()} before deploying to {slot} to avoid a port or conflict' + ) + # NOTE: we don't check ok-to-stop here to avoid starvation if + # there is only 1 gateway. + self._remove_daemon(d.name(), d.hostname) + daemons_to_remove.remove(d) + progress_done += 1 + hosts_altered.add(d.hostname) + break + + # deploy new daemon + daemon_id = slot.name + + daemon_spec = svc.make_daemon_spec( + slot.hostname, daemon_id, slot.network, spec, + daemon_type=slot.daemon_type, + ports=slot.ports, + ip=slot.ip, + rank=slot.rank, + rank_generation=slot.rank_generation, + ) + self.log.debug('Placing %s.%s on host %s' % ( + slot.daemon_type, daemon_id, slot.hostname)) + + try: + daemon_spec = svc.prepare_create(daemon_spec) + with self.mgr.async_timeout_handler(slot.hostname, f'cephadm deploy ({daemon_spec.daemon_type} type dameon)'): + self.mgr.wait_async(self._create_daemon(daemon_spec)) + r = True + progress_done += 1 + update_progress() + hosts_altered.add(daemon_spec.host) + self.mgr.spec_store.mark_needs_configuration(spec.service_name()) + except (RuntimeError, OrchestratorError) as e: + msg = (f"Failed while placing {slot.daemon_type}.{daemon_id} " + f"on {slot.hostname}: {e}") + self.mgr.events.for_service(spec, 'ERROR', msg) + self.mgr.log.error(msg) + daemon_place_fails.append(msg) + # only return "no change" if no one else has already succeeded. + # later successes will also change to True + if r is None: + r = False + progress_done += 1 + update_progress() + continue + + # add to daemon list so next name(s) will also be unique + sd = orchestrator.DaemonDescription( + hostname=slot.hostname, + daemon_type=slot.daemon_type, + daemon_id=daemon_id, + service_name=spec.service_name() + ) + daemons.append(sd) + self.mgr.cache.append_tmp_daemon(slot.hostname, sd) + + if daemon_place_fails: + self.mgr.set_health_warning('CEPHADM_DAEMON_PLACE_FAIL', f'Failed to place {len(daemon_place_fails)} daemon(s)', len( + daemon_place_fails), daemon_place_fails) + + if service_type == 'mgr': + active_mgr = svc.get_active_daemon(self.mgr.cache.get_daemons_by_type('mgr')) + if active_mgr.daemon_id in [d.daemon_id for d in daemons_to_remove]: + # We can't just remove the active mgr like any other daemon. + # Need to fail over later so it can be removed on next pass. + # This can be accomplished by scheduling a restart of the active mgr. + self.mgr._schedule_daemon_action(active_mgr.name(), 'restart') + + if service_type == 'rgw': + self._update_rgw_endpoints(cast(RGWSpec, spec)) + + # remove any? + def _ok_to_stop(remove_daemons: List[orchestrator.DaemonDescription]) -> bool: + daemon_ids = [d.daemon_id for d in remove_daemons] + assert None not in daemon_ids + # setting force flag retains previous behavior + r = svc.ok_to_stop(cast(List[str], daemon_ids), force=True) + return not r.retval + + while daemons_to_remove and not _ok_to_stop(daemons_to_remove): + # let's find a subset that is ok-to-stop + non_error_daemon_index = -1 + # prioritize removing daemons in error state + for i, dmon in enumerate(daemons_to_remove): + if dmon.status != DaemonDescriptionStatus.error: + non_error_daemon_index = i + break + if non_error_daemon_index != -1: + daemons_to_remove.pop(non_error_daemon_index) + else: + # all daemons in list are in error state + # we should be able to remove all of them + break + for d in daemons_to_remove: + r = True + assert d.hostname is not None + self._remove_daemon(d.name(), d.hostname) + + progress_done += 1 + update_progress() + hosts_altered.add(d.hostname) + self.mgr.spec_store.mark_needs_configuration(spec.service_name()) + + self.mgr.remote('progress', 'complete', progress_id) + except Exception as e: + self.mgr.remote('progress', 'fail', progress_id, str(e)) + raise + finally: + if self.mgr.spec_store.needs_configuration(spec.service_name()): + svc.config(spec) + self.mgr.spec_store.mark_configured(spec.service_name()) + if self.mgr.use_agent: + # can only send ack to agents if we know for sure port they bound to + hosts_altered = set([h for h in hosts_altered if (h in self.mgr.agent_cache.agent_ports and not self.mgr.cache.is_host_draining(h))]) + self.mgr.agent_helpers._request_agent_acks(hosts_altered, increment=True) + + if r is None: + r = False + return r + + def _check_daemons(self) -> None: + self.log.debug('_check_daemons') + daemons = self.mgr.cache.get_daemons() + daemons_post: Dict[str, List[orchestrator.DaemonDescription]] = defaultdict(list) + for dd in daemons: + # orphan? + spec = self.mgr.spec_store.active_specs.get(dd.service_name(), None) + assert dd.hostname is not None + assert dd.daemon_type is not None + assert dd.daemon_id is not None + + # any action we can try will fail for a daemon on an offline host, + # including removing the daemon + if dd.hostname in self.mgr.offline_hosts: + continue + + if not spec and dd.daemon_type not in ['mon', 'mgr', 'osd']: + # (mon and mgr specs should always exist; osds aren't matched + # to a service spec) + self.log.info('Removing orphan daemon %s...' % dd.name()) + self._remove_daemon(dd.name(), dd.hostname) + + # ignore unmanaged services + if spec and spec.unmanaged: + continue + + # ignore daemons for deleted services + if dd.service_name() in self.mgr.spec_store.spec_deleted: + continue + + if dd.daemon_type == 'agent': + try: + self.mgr.agent_helpers._check_agent(dd.hostname) + except Exception as e: + self.log.debug( + f'Agent {dd.name()} could not be checked in _check_daemons: {e}') + continue + + # These daemon types require additional configs after creation + if dd.daemon_type in REQUIRES_POST_ACTIONS: + daemons_post[dd.daemon_type].append(dd) + + if self.mgr.cephadm_services[daemon_type_to_service(dd.daemon_type)].get_active_daemon( + self.mgr.cache.get_daemons_by_service(dd.service_name())).daemon_id == dd.daemon_id: + dd.is_active = True + else: + dd.is_active = False + + deps = self.mgr._calc_daemon_deps(spec, dd.daemon_type, dd.daemon_id) + last_deps, last_config = self.mgr.cache.get_daemon_last_config_deps( + dd.hostname, dd.name()) + if last_deps is None: + last_deps = [] + action = self.mgr.cache.get_scheduled_daemon_action(dd.hostname, dd.name()) + if not last_config: + self.log.info('Reconfiguring %s (unknown last config time)...' % ( + dd.name())) + action = 'reconfig' + elif last_deps != deps: + self.log.debug(f'{dd.name()} deps {last_deps} -> {deps}') + self.log.info(f'Reconfiguring {dd.name()} (dependencies changed)...') + action = 'reconfig' + # we need only redeploy if secure_monitoring_stack value has changed: + if dd.daemon_type in ['prometheus', 'node-exporter', 'alertmanager']: + diff = list(set(last_deps) - set(deps)) + if any('secure_monitoring_stack' in e for e in diff): + action = 'redeploy' + + elif spec is not None and hasattr(spec, 'extra_container_args') and dd.extra_container_args != spec.extra_container_args: + self.log.debug( + f'{dd.name()} container cli args {dd.extra_container_args} -> {spec.extra_container_args}') + self.log.info(f'Redeploying {dd.name()}, (container cli args changed) . . .') + dd.extra_container_args = spec.extra_container_args + action = 'redeploy' + elif spec is not None and hasattr(spec, 'extra_entrypoint_args') and dd.extra_entrypoint_args != spec.extra_entrypoint_args: + self.log.info(f'Redeploying {dd.name()}, (entrypoint args changed) . . .') + self.log.debug( + f'{dd.name()} daemon entrypoint args {dd.extra_entrypoint_args} -> {spec.extra_entrypoint_args}') + dd.extra_entrypoint_args = spec.extra_entrypoint_args + action = 'redeploy' + elif self.mgr.last_monmap and \ + self.mgr.last_monmap > last_config and \ + dd.daemon_type in CEPH_TYPES: + self.log.info('Reconfiguring %s (monmap changed)...' % dd.name()) + action = 'reconfig' + elif self.mgr.extra_ceph_conf_is_newer(last_config) and \ + dd.daemon_type in CEPH_TYPES: + self.log.info('Reconfiguring %s (extra config changed)...' % dd.name()) + action = 'reconfig' + if action: + if self.mgr.cache.get_scheduled_daemon_action(dd.hostname, dd.name()) == 'redeploy' \ + and action == 'reconfig': + action = 'redeploy' + try: + daemon_spec = CephadmDaemonDeploySpec.from_daemon_description(dd) + self.mgr._daemon_action(daemon_spec, action=action) + if self.mgr.cache.rm_scheduled_daemon_action(dd.hostname, dd.name()): + self.mgr.cache.save_host(dd.hostname) + except OrchestratorError as e: + self.log.exception(e) + self.mgr.events.from_orch_error(e) + if dd.daemon_type in daemons_post: + del daemons_post[dd.daemon_type] + # continue... + except Exception as e: + self.log.exception(e) + self.mgr.events.for_daemon_from_exception(dd.name(), e) + if dd.daemon_type in daemons_post: + del daemons_post[dd.daemon_type] + # continue... + + # do daemon post actions + for daemon_type, daemon_descs in daemons_post.items(): + run_post = False + for d in daemon_descs: + if d.name() in self.mgr.requires_post_actions: + self.mgr.requires_post_actions.remove(d.name()) + run_post = True + if run_post: + self.mgr._get_cephadm_service(daemon_type_to_service( + daemon_type)).daemon_check_post(daemon_descs) + + def _purge_deleted_services(self) -> None: + self.log.debug('_purge_deleted_services') + existing_services = self.mgr.spec_store.all_specs.items() + for service_name, spec in list(existing_services): + if service_name not in self.mgr.spec_store.spec_deleted: + continue + if self.mgr.cache.get_daemons_by_service(service_name): + continue + if spec.service_type in ['mon', 'mgr']: + continue + + logger.info(f'Purge service {service_name}') + + self.mgr.cephadm_services[spec.service_type].purge(service_name) + self.mgr.spec_store.finally_rm(service_name) + + def convert_tags_to_repo_digest(self) -> None: + if not self.mgr.use_repo_digest: + return + settings = self.mgr.upgrade.get_distinct_container_image_settings() + digests: Dict[str, ContainerInspectInfo] = {} + for container_image_ref in set(settings.values()): + if not is_repo_digest(container_image_ref): + with self.mgr.async_timeout_handler(cmd=f'cephadm inspect-image (image {container_image_ref})'): + image_info = self.mgr.wait_async( + self._get_container_image_info(container_image_ref)) + if image_info.repo_digests: + # FIXME: we assume the first digest here is the best + assert is_repo_digest(image_info.repo_digests[0]), image_info + digests[container_image_ref] = image_info + + for entity, container_image_ref in settings.items(): + if not is_repo_digest(container_image_ref): + image_info = digests[container_image_ref] + if image_info.repo_digests: + # FIXME: we assume the first digest here is the best + self.mgr.set_container_image(entity, image_info.repo_digests[0]) + + def _calc_client_files(self) -> Dict[str, Dict[str, Tuple[int, int, int, bytes, str]]]: + # host -> path -> (mode, uid, gid, content, digest) + client_files: Dict[str, Dict[str, Tuple[int, int, int, bytes, str]]] = {} + + # ceph.conf + config = self.mgr.get_minimal_ceph_conf().encode('utf-8') + config_digest = ''.join('%02x' % c for c in hashlib.sha256(config).digest()) + cluster_cfg_dir = f'/var/lib/ceph/{self.mgr._cluster_fsid}/config' + + if self.mgr.manage_etc_ceph_ceph_conf: + try: + pspec = PlacementSpec.from_string(self.mgr.manage_etc_ceph_ceph_conf_hosts) + ha = HostAssignment( + spec=ServiceSpec('mon', placement=pspec), + hosts=self.mgr.cache.get_conf_keyring_available_hosts(), + unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_conf_keyring_draining_hosts(), + daemons=[], + networks=self.mgr.cache.networks, + ) + all_slots, _, _ = ha.place() + for host in {s.hostname for s in all_slots}: + if host not in client_files: + client_files[host] = {} + ceph_conf = (0o644, 0, 0, bytes(config), str(config_digest)) + client_files[host]['/etc/ceph/ceph.conf'] = ceph_conf + client_files[host][f'{cluster_cfg_dir}/ceph.conf'] = ceph_conf + except Exception as e: + self.mgr.log.warning( + f'unable to calc conf hosts: {self.mgr.manage_etc_ceph_ceph_conf_hosts}: {e}') + + # client keyrings + for ks in self.mgr.keys.keys.values(): + try: + ret, keyring, err = self.mgr.mon_command({ + 'prefix': 'auth get', + 'entity': ks.entity, + }) + if ret: + self.log.warning(f'unable to fetch keyring for {ks.entity}') + continue + digest = ''.join('%02x' % c for c in hashlib.sha256( + keyring.encode('utf-8')).digest()) + ha = HostAssignment( + spec=ServiceSpec('mon', placement=ks.placement), + hosts=self.mgr.cache.get_conf_keyring_available_hosts(), + unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_conf_keyring_draining_hosts(), + daemons=[], + networks=self.mgr.cache.networks, + ) + all_slots, _, _ = ha.place() + for host in {s.hostname for s in all_slots}: + if host not in client_files: + client_files[host] = {} + ceph_conf = (0o644, 0, 0, bytes(config), str(config_digest)) + client_files[host]['/etc/ceph/ceph.conf'] = ceph_conf + client_files[host][f'{cluster_cfg_dir}/ceph.conf'] = ceph_conf + ceph_admin_key = (ks.mode, ks.uid, ks.gid, keyring.encode('utf-8'), digest) + client_files[host][ks.path] = ceph_admin_key + client_files[host][f'{cluster_cfg_dir}/{os.path.basename(ks.path)}'] = ceph_admin_key + except Exception as e: + self.log.warning( + f'unable to calc client keyring {ks.entity} placement {ks.placement}: {e}') + return client_files + + def _write_all_client_files(self) -> None: + if self.mgr.manage_etc_ceph_ceph_conf or self.mgr.keys.keys: + client_files = self._calc_client_files() + else: + client_files = {} + + @forall_hosts + def _write_files(host: str) -> None: + self._write_client_files(client_files, host) + + _write_files(self.mgr.cache.get_hosts()) + + def _write_client_files(self, + client_files: Dict[str, Dict[str, Tuple[int, int, int, bytes, str]]], + host: str) -> None: + updated_files = False + if self.mgr.cache.is_host_unreachable(host): + return + old_files = self.mgr.cache.get_host_client_files(host).copy() + for path, m in client_files.get(host, {}).items(): + mode, uid, gid, content, digest = m + if path in old_files: + match = old_files[path] == (digest, mode, uid, gid) + del old_files[path] + if match: + continue + self.log.info(f'Updating {host}:{path}') + self.mgr.ssh.write_remote_file(host, path, content, mode, uid, gid) + self.mgr.cache.update_client_file(host, path, digest, mode, uid, gid) + updated_files = True + for path in old_files.keys(): + if path == '/etc/ceph/ceph.conf': + continue + self.log.info(f'Removing {host}:{path}') + cmd = ['rm', '-f', path] + self.mgr.ssh.check_execute_command(host, cmd) + updated_files = True + self.mgr.cache.removed_client_file(host, path) + if updated_files: + self.mgr.cache.save_host(host) + + async def _create_daemon(self, + daemon_spec: CephadmDaemonDeploySpec, + reconfig: bool = False, + osd_uuid_map: Optional[Dict[str, Any]] = None, + ) -> str: + + daemon_params: Dict[str, Any] = {} + with set_exception_subject('service', orchestrator.DaemonDescription( + daemon_type=daemon_spec.daemon_type, + daemon_id=daemon_spec.daemon_id, + hostname=daemon_spec.host, + ).service_id(), overwrite=True): + + try: + image = '' + start_time = datetime_now() + ports: List[int] = daemon_spec.ports if daemon_spec.ports else [] + port_ips: Dict[str, str] = daemon_spec.port_ips if daemon_spec.port_ips else {} + + if daemon_spec.daemon_type == 'container': + spec = cast(CustomContainerSpec, + self.mgr.spec_store[daemon_spec.service_name].spec) + image = spec.image + if spec.ports: + ports.extend(spec.ports) + + # TCP port to open in the host firewall + if len(ports) > 0: + daemon_params['tcp_ports'] = list(ports) + + if port_ips: + daemon_params['port_ips'] = port_ips + + # osd deployments needs an --osd-uuid arg + if daemon_spec.daemon_type == 'osd': + if not osd_uuid_map: + osd_uuid_map = self.mgr.get_osd_uuid_map() + osd_uuid = osd_uuid_map.get(daemon_spec.daemon_id) + if not osd_uuid: + raise OrchestratorError('osd.%s not in osdmap' % daemon_spec.daemon_id) + daemon_params['osd_fsid'] = osd_uuid + + if reconfig: + daemon_params['reconfig'] = True + if self.mgr.allow_ptrace: + daemon_params['allow_ptrace'] = True + + daemon_spec, extra_container_args, extra_entrypoint_args = self._setup_extra_deployment_args(daemon_spec, daemon_params) + + if daemon_spec.service_name in self.mgr.spec_store: + configs = self.mgr.spec_store[daemon_spec.service_name].spec.custom_configs + if configs is not None: + daemon_spec.final_config.update( + {'custom_config_files': [c.to_json() for c in configs]}) + + if self.mgr.cache.host_needs_registry_login(daemon_spec.host) and self.mgr.registry_url: + await self._registry_login(daemon_spec.host, json.loads(str(self.mgr.get_store('registry_credentials')))) + + self.log.info('%s daemon %s on %s' % ( + 'Reconfiguring' if reconfig else 'Deploying', + daemon_spec.name(), daemon_spec.host)) + + out, err, code = await self._run_cephadm( + daemon_spec.host, + daemon_spec.name(), + ['_orch', 'deploy'], + [], + stdin=exchange.Deploy( + fsid=self.mgr._cluster_fsid, + name=daemon_spec.name(), + image=image, + params=daemon_params, + meta=exchange.DeployMeta( + service_name=daemon_spec.service_name, + ports=daemon_spec.ports, + ip=daemon_spec.ip, + deployed_by=self.mgr.get_active_mgr_digests(), + rank=daemon_spec.rank, + rank_generation=daemon_spec.rank_generation, + extra_container_args=ArgumentSpec.map_json( + extra_container_args, + ), + extra_entrypoint_args=ArgumentSpec.map_json( + extra_entrypoint_args, + ), + ), + config_blobs=daemon_spec.final_config, + ).dump_json_str(), + ) + + if daemon_spec.daemon_type == 'agent': + self.mgr.agent_cache.agent_timestamp[daemon_spec.host] = datetime_now() + self.mgr.agent_cache.agent_counter[daemon_spec.host] = 1 + + # refresh daemon state? (ceph daemon reconfig does not need it) + if not reconfig or daemon_spec.daemon_type not in CEPH_TYPES: + if not code and daemon_spec.host in self.mgr.cache.daemons: + # prime cached service state with what we (should have) + # just created + sd = daemon_spec.to_daemon_description( + DaemonDescriptionStatus.starting, 'starting') + self.mgr.cache.add_daemon(daemon_spec.host, sd) + if daemon_spec.daemon_type in REQUIRES_POST_ACTIONS: + self.mgr.requires_post_actions.add(daemon_spec.name()) + self.mgr.cache.invalidate_host_daemons(daemon_spec.host) + + if daemon_spec.daemon_type != 'agent': + self.mgr.cache.update_daemon_config_deps( + daemon_spec.host, daemon_spec.name(), daemon_spec.deps, start_time) + self.mgr.cache.save_host(daemon_spec.host) + else: + self.mgr.agent_cache.update_agent_config_deps( + daemon_spec.host, daemon_spec.deps, start_time) + self.mgr.agent_cache.save_agent(daemon_spec.host) + msg = "{} {} on host '{}'".format( + 'Reconfigured' if reconfig else 'Deployed', daemon_spec.name(), daemon_spec.host) + if not code: + self.mgr.events.for_daemon(daemon_spec.name(), OrchestratorEvent.INFO, msg) + else: + what = 'reconfigure' if reconfig else 'deploy' + self.mgr.events.for_daemon( + daemon_spec.name(), OrchestratorEvent.ERROR, f'Failed to {what}: {err}') + return msg + except OrchestratorError: + redeploy = daemon_spec.name() in self.mgr.cache.get_daemon_names() + if not reconfig and not redeploy: + # we have to clean up the daemon. E.g. keyrings. + servict_type = daemon_type_to_service(daemon_spec.daemon_type) + dd = daemon_spec.to_daemon_description(DaemonDescriptionStatus.error, 'failed') + self.mgr.cephadm_services[servict_type].post_remove(dd, is_failed_deploy=True) + raise + + def _setup_extra_deployment_args( + self, + daemon_spec: CephadmDaemonDeploySpec, + params: Dict[str, Any], + ) -> Tuple[CephadmDaemonDeploySpec, Optional[ArgumentList], Optional[ArgumentList]]: + # this function is for handling any potential user specified + # (in the service spec) extra runtime or entrypoint args for a daemon + # we are going to deploy. Effectively just adds a set of extra args to + # pass to the cephadm binary to indicate the daemon being deployed + # needs extra runtime/entrypoint args. Returns the modified daemon spec + # as well as what args were added (as those are included in unit.meta file) + def _to_args(lst: ArgumentList) -> List[str]: + out: List[str] = [] + for argspec in lst: + out.extend(argspec.to_args()) + return out + + try: + eca = daemon_spec.extra_container_args + if eca: + params['extra_container_args'] = _to_args(eca) + except AttributeError: + eca = None + try: + eea = daemon_spec.extra_entrypoint_args + if eea: + params['extra_entrypoint_args'] = _to_args(eea) + except AttributeError: + eea = None + return daemon_spec, eca, eea + + def _remove_daemon(self, name: str, host: str, no_post_remove: bool = False) -> str: + """ + Remove a daemon + """ + (daemon_type, daemon_id) = name.split('.', 1) + daemon = orchestrator.DaemonDescription( + daemon_type=daemon_type, + daemon_id=daemon_id, + hostname=host) + + with set_exception_subject('service', daemon.service_id(), overwrite=True): + + self.mgr.cephadm_services[daemon_type_to_service(daemon_type)].pre_remove(daemon) + # NOTE: we are passing the 'force' flag here, which means + # we can delete a mon instances data. + dd = self.mgr.cache.get_daemon(daemon.daemon_name) + if dd.ports: + args = ['--name', name, '--force', '--tcp-ports', ' '.join(map(str, dd.ports))] + else: + args = ['--name', name, '--force'] + + self.log.info('Removing daemon %s from %s -- ports %s' % (name, host, dd.ports)) + with self.mgr.async_timeout_handler(host, f'cephadm rm-daemon (daemon {name})'): + out, err, code = self.mgr.wait_async(self._run_cephadm( + host, name, 'rm-daemon', args)) + if not code: + # remove item from cache + self.mgr.cache.rm_daemon(host, name) + self.mgr.cache.invalidate_host_daemons(host) + + if not no_post_remove: + if daemon_type not in ['iscsi']: + self.mgr.cephadm_services[daemon_type_to_service( + daemon_type)].post_remove(daemon, is_failed_deploy=False) + else: + self.mgr.scheduled_async_actions.append(lambda: self.mgr.cephadm_services[daemon_type_to_service( + daemon_type)].post_remove(daemon, is_failed_deploy=False)) + self.mgr._kick_serve_loop() + + return "Removed {} from host '{}'".format(name, host) + + async def _run_cephadm_json(self, + host: str, + entity: Union[CephadmNoImage, str], + command: str, + args: List[str], + no_fsid: Optional[bool] = False, + error_ok: Optional[bool] = False, + image: Optional[str] = "", + log_output: Optional[bool] = True, + ) -> Any: + try: + out, err, code = await self._run_cephadm( + host, entity, command, args, no_fsid=no_fsid, error_ok=error_ok, + image=image, log_output=log_output) + if code: + raise OrchestratorError(f'host {host} `cephadm {command}` returned {code}: {err}') + except Exception as e: + raise OrchestratorError(f'host {host} `cephadm {command}` failed: {e}') + try: + return json.loads(''.join(out)) + except (ValueError, KeyError): + msg = f'host {host} `cephadm {command}` failed: Cannot decode JSON' + self.log.exception(f'{msg}: {"".join(out)}') + raise OrchestratorError(msg) + + async def _run_cephadm(self, + host: str, + entity: Union[CephadmNoImage, str], + command: Union[str, List[str]], + args: List[str], + addr: Optional[str] = "", + stdin: Optional[str] = "", + no_fsid: Optional[bool] = False, + error_ok: Optional[bool] = False, + image: Optional[str] = "", + env_vars: Optional[List[str]] = None, + log_output: Optional[bool] = True, + timeout: Optional[int] = None, # timeout in seconds + ) -> Tuple[List[str], List[str], int]: + """ + Run cephadm on the remote host with the given command + args + + Important: You probably don't want to run _run_cephadm from CLI handlers + + :env_vars: in format -> [KEY=VALUE, ..] + """ + + await self.mgr.ssh._remote_connection(host, addr) + + self.log.debug(f"_run_cephadm : command = {command}") + self.log.debug(f"_run_cephadm : args = {args}") + + bypass_image = ('agent') + + assert image or entity + # Skip the image check for daemons deployed that are not ceph containers + if not str(entity).startswith(bypass_image): + if not image and entity is not cephadmNoImage: + image = self.mgr._get_container_image(entity) + + final_args = [] + + # global args + if env_vars: + for env_var_pair in env_vars: + final_args.extend(['--env', env_var_pair]) + + if image: + final_args.extend(['--image', image]) + + if not self.mgr.container_init: + final_args += ['--no-container-init'] + + if not self.mgr.cgroups_split: + final_args += ['--no-cgroups-split'] + + if not timeout: + # default global timeout if no timeout was passed + timeout = self.mgr.default_cephadm_command_timeout + # put a lower bound of 60 seconds in case users + # accidentally set it to something unreasonable. + # For example if they though it was in minutes + # rather than seconds + if timeout < 60: + self.log.info(f'Found default timeout set to {timeout}. Instead trying minimum of 60.') + timeout = 60 + # subtract a small amount to give this timeout + # in the binary a chance to actually happen over + # the asyncio based timeout in the mgr module + timeout -= 5 + final_args += ['--timeout', str(timeout)] + + # subcommand + if isinstance(command, list): + final_args.extend([str(v) for v in command]) + else: + final_args.append(command) + + # subcommand args + if not no_fsid: + final_args += ['--fsid', self.mgr._cluster_fsid] + + final_args += args + + # exec + self.log.debug('args: %s' % (' '.join(final_args))) + if self.mgr.mode == 'root': + # agent has cephadm binary as an extra file which is + # therefore passed over stdin. Even for debug logs it's too much + if stdin and 'agent' not in str(entity): + self.log.debug('stdin: %s' % stdin) + + cmd = ['which', 'python3'] + python = await self.mgr.ssh._check_execute_command(host, cmd, addr=addr) + cmd = [python, self.mgr.cephadm_binary_path] + final_args + + try: + out, err, code = await self.mgr.ssh._execute_command( + host, cmd, stdin=stdin, addr=addr) + if code == 2: + ls_cmd = ['ls', self.mgr.cephadm_binary_path] + out_ls, err_ls, code_ls = await self.mgr.ssh._execute_command(host, ls_cmd, addr=addr, + log_command=log_output) + if code_ls == 2: + await self._deploy_cephadm_binary(host, addr) + out, err, code = await self.mgr.ssh._execute_command( + host, cmd, stdin=stdin, addr=addr) + # if there is an agent on this host, make sure it is using the most recent + # version of cephadm binary + if host in self.mgr.inventory: + for agent in self.mgr.cache.get_daemons_by_type('agent', host): + self.mgr._schedule_daemon_action(agent.name(), 'redeploy') + + except Exception as e: + await self.mgr.ssh._reset_con(host) + if error_ok: + return [], [str(e)], 1 + raise + + elif self.mgr.mode == 'cephadm-package': + try: + cmd = ['/usr/bin/cephadm'] + final_args + out, err, code = await self.mgr.ssh._execute_command( + host, cmd, stdin=stdin, addr=addr) + except Exception as e: + await self.mgr.ssh._reset_con(host) + if error_ok: + return [], [str(e)], 1 + raise + else: + assert False, 'unsupported mode' + + if log_output: + self.log.debug(f'code: {code}') + if out: + self.log.debug(f'out: {out}') + if err: + self.log.debug(f'err: {err}') + if code and not error_ok: + raise OrchestratorError( + f'cephadm exited with an error code: {code}, stderr: {err}') + return [out], [err], code + + async def _get_container_image_info(self, image_name: str) -> ContainerInspectInfo: + # pick a random host... + host = None + for host_name in self.mgr.inventory.keys(): + host = host_name + break + if not host: + raise OrchestratorError('no hosts defined') + if self.mgr.cache.host_needs_registry_login(host) and self.mgr.registry_url: + await self._registry_login(host, json.loads(str(self.mgr.get_store('registry_credentials')))) + + j = None + try: + j = await self._run_cephadm_json(host, '', 'inspect-image', [], + image=image_name, no_fsid=True, + error_ok=True) + except OrchestratorError: + pass + + if not j: + pullargs: List[str] = [] + if self.mgr.registry_insecure: + pullargs.append("--insecure") + + j = await self._run_cephadm_json(host, '', 'pull', pullargs, + image=image_name, no_fsid=True) + r = ContainerInspectInfo( + j['image_id'], + j.get('ceph_version'), + j.get('repo_digests') + ) + self.log.debug(f'image {image_name} -> {r}') + return r + + # function responsible for logging single host into custom registry + async def _registry_login(self, host: str, registry_json: Dict[str, str]) -> Optional[str]: + self.log.debug( + f"Attempting to log host {host} into custom registry @ {registry_json['url']}") + # want to pass info over stdin rather than through normal list of args + out, err, code = await self._run_cephadm( + host, 'mon', 'registry-login', + ['--registry-json', '-'], stdin=json.dumps(registry_json), error_ok=True) + if code: + return f"Host {host} failed to login to {registry_json['url']} as {registry_json['username']} with given password" + return None + + async def _deploy_cephadm_binary(self, host: str, addr: Optional[str] = None) -> None: + # Use tee (from coreutils) to create a copy of cephadm on the target machine + self.log.info(f"Deploying cephadm binary to {host}") + await self.mgr.ssh._write_remote_file(host, self.mgr.cephadm_binary_path, + self.mgr._cephadm, addr=addr) diff --git a/src/pybind/mgr/cephadm/service_discovery.py b/src/pybind/mgr/cephadm/service_discovery.py new file mode 100644 index 000000000..ddc0574e2 --- /dev/null +++ b/src/pybind/mgr/cephadm/service_discovery.py @@ -0,0 +1,239 @@ +try: + import cherrypy + from cherrypy._cpserver import Server +except ImportError: + # to avoid sphinx build crash + class Server: # type: ignore + pass + +import logging +import socket + +import orchestrator # noqa +from mgr_module import ServiceInfoT +from mgr_util import build_url +from typing import Dict, List, TYPE_CHECKING, cast, Collection, Callable, NamedTuple, Optional +from cephadm.services.monitoring import AlertmanagerService, NodeExporterService, PrometheusService +import secrets + +from cephadm.services.ingress import IngressSpec +from cephadm.ssl_cert_utils import SSLCerts +from cephadm.services.cephadmservice import CephExporterService + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + + +def cherrypy_filter(record: logging.LogRecord) -> int: + blocked = [ + 'TLSV1_ALERT_DECRYPT_ERROR' + ] + msg = record.getMessage() + return not any([m for m in blocked if m in msg]) + + +logging.getLogger('cherrypy.error').addFilter(cherrypy_filter) +cherrypy.log.access_log.propagate = False + + +class Route(NamedTuple): + name: str + route: str + controller: Callable + + +class ServiceDiscovery: + + KV_STORE_SD_ROOT_CERT = 'service_discovery/root/cert' + KV_STORE_SD_ROOT_KEY = 'service_discovery/root/key' + + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr = mgr + self.ssl_certs = SSLCerts() + self.username: Optional[str] = None + self.password: Optional[str] = None + + def validate_password(self, realm: str, username: str, password: str) -> bool: + return (password == self.password and username == self.username) + + def configure_routes(self, server: Server, enable_auth: bool) -> None: + ROUTES = [ + Route('index', '/', server.index), + Route('sd-config', '/prometheus/sd-config', server.get_sd_config), + Route('rules', '/prometheus/rules', server.get_prometheus_rules), + ] + d = cherrypy.dispatch.RoutesDispatcher() + for route in ROUTES: + d.connect(**route._asdict()) + if enable_auth: + conf = { + '/': { + 'request.dispatch': d, + 'tools.auth_basic.on': True, + 'tools.auth_basic.realm': 'localhost', + 'tools.auth_basic.checkpassword': self.validate_password + } + } + else: + conf = {'/': {'request.dispatch': d}} + cherrypy.tree.mount(None, '/sd', config=conf) + + def enable_auth(self) -> None: + self.username = self.mgr.get_store('service_discovery/root/username') + self.password = self.mgr.get_store('service_discovery/root/password') + if not self.password or not self.username: + self.username = 'admin' # TODO(redo): what should be the default username + self.password = secrets.token_urlsafe(20) + self.mgr.set_store('service_discovery/root/password', self.password) + self.mgr.set_store('service_discovery/root/username', self.username) + + def configure_tls(self, server: Server) -> None: + old_cert = self.mgr.get_store(self.KV_STORE_SD_ROOT_CERT) + old_key = self.mgr.get_store(self.KV_STORE_SD_ROOT_KEY) + if old_key and old_cert: + self.ssl_certs.load_root_credentials(old_cert, old_key) + else: + self.ssl_certs.generate_root_cert(self.mgr.get_mgr_ip()) + self.mgr.set_store(self.KV_STORE_SD_ROOT_CERT, self.ssl_certs.get_root_cert()) + self.mgr.set_store(self.KV_STORE_SD_ROOT_KEY, self.ssl_certs.get_root_key()) + addr = self.mgr.get_mgr_ip() + host_fqdn = socket.getfqdn(addr) + server.ssl_certificate, server.ssl_private_key = self.ssl_certs.generate_cert_files( + host_fqdn, addr) + + def configure(self, port: int, addr: str, enable_security: bool) -> None: + # we create a new server to enforce TLS/SSL config refresh + self.root_server = Root(self.mgr, port, addr) + self.root_server.ssl_certificate = None + self.root_server.ssl_private_key = None + if enable_security: + self.enable_auth() + self.configure_tls(self.root_server) + self.configure_routes(self.root_server, enable_security) + + +class Root(Server): + + # collapse everything to '/' + def _cp_dispatch(self, vpath: str) -> 'Root': + cherrypy.request.path = '' + return self + + def stop(self) -> None: + # we must call unsubscribe before stopping the server, + # otherwise the port is not released and we will get + # an exception when trying to restart it + self.unsubscribe() + super().stop() + + def __init__(self, mgr: "CephadmOrchestrator", port: int = 0, host: str = ''): + self.mgr = mgr + super().__init__() + self.socket_port = port + self.socket_host = host + self.subscribe() + + @cherrypy.expose + def index(self) -> str: + return ''' + +Cephadm HTTP Endpoint + +

Cephadm Service Discovery Endpoints

+

mgr/Prometheus http sd-config

+

Alertmanager http sd-config

+

Node exporter http sd-config

+

HAProxy http sd-config

+

Ceph exporter http sd-config

+

Prometheus rules

+ +''' + + @cherrypy.expose + @cherrypy.tools.json_out() + def get_sd_config(self, service: str) -> List[Dict[str, Collection[str]]]: + """Return compatible prometheus config for the specified service.""" + if service == 'mgr-prometheus': + return self.prometheus_sd_config() + elif service == 'alertmanager': + return self.alertmgr_sd_config() + elif service == 'node-exporter': + return self.node_exporter_sd_config() + elif service == 'haproxy': + return self.haproxy_sd_config() + elif service == 'ceph-exporter': + return self.ceph_exporter_sd_config() + else: + return [] + + def prometheus_sd_config(self) -> List[Dict[str, Collection[str]]]: + """Return compatible prometheus config for prometheus service.""" + servers = self.mgr.list_servers() + targets = [] + for server in servers: + hostname = server.get('hostname', '') + for service in cast(List[ServiceInfoT], server.get('services', [])): + if service['type'] != 'mgr' or service['id'] != self.mgr.get_mgr_id(): + continue + port = self.mgr.get_module_option_ex( + 'prometheus', 'server_port', PrometheusService.DEFAULT_MGR_PROMETHEUS_PORT) + targets.append(f'{hostname}:{port}') + return [{"targets": targets, "labels": {}}] + + def alertmgr_sd_config(self) -> List[Dict[str, Collection[str]]]: + """Return compatible prometheus config for mgr alertmanager service.""" + srv_entries = [] + for dd in self.mgr.cache.get_daemons_by_service('alertmanager'): + assert dd.hostname is not None + addr = dd.ip if dd.ip else self.mgr.inventory.get_addr(dd.hostname) + port = dd.ports[0] if dd.ports else AlertmanagerService.DEFAULT_SERVICE_PORT + srv_entries.append('{}'.format(build_url(host=addr, port=port).lstrip('/'))) + return [{"targets": srv_entries, "labels": {}}] + + def node_exporter_sd_config(self) -> List[Dict[str, Collection[str]]]: + """Return compatible prometheus config for node-exporter service.""" + srv_entries = [] + for dd in self.mgr.cache.get_daemons_by_service('node-exporter'): + assert dd.hostname is not None + addr = dd.ip if dd.ip else self.mgr.inventory.get_addr(dd.hostname) + port = dd.ports[0] if dd.ports else NodeExporterService.DEFAULT_SERVICE_PORT + srv_entries.append({ + 'targets': [build_url(host=addr, port=port).lstrip('/')], + 'labels': {'instance': dd.hostname} + }) + return srv_entries + + def haproxy_sd_config(self) -> List[Dict[str, Collection[str]]]: + """Return compatible prometheus config for haproxy service.""" + srv_entries = [] + for dd in self.mgr.cache.get_daemons_by_type('ingress'): + if dd.service_name() in self.mgr.spec_store: + spec = cast(IngressSpec, self.mgr.spec_store[dd.service_name()].spec) + assert dd.hostname is not None + if dd.daemon_type == 'haproxy': + addr = self.mgr.inventory.get_addr(dd.hostname) + srv_entries.append({ + 'targets': [f"{build_url(host=addr, port=spec.monitor_port).lstrip('/')}"], + 'labels': {'instance': dd.service_name()} + }) + return srv_entries + + def ceph_exporter_sd_config(self) -> List[Dict[str, Collection[str]]]: + """Return compatible prometheus config for ceph-exporter service.""" + srv_entries = [] + for dd in self.mgr.cache.get_daemons_by_service('ceph-exporter'): + assert dd.hostname is not None + addr = dd.ip if dd.ip else self.mgr.inventory.get_addr(dd.hostname) + port = dd.ports[0] if dd.ports else CephExporterService.DEFAULT_SERVICE_PORT + srv_entries.append({ + 'targets': [build_url(host=addr, port=port).lstrip('/')], + 'labels': {'instance': dd.hostname} + }) + return srv_entries + + @cherrypy.expose(alias='prometheus/rules') + def get_prometheus_rules(self) -> str: + """Return currently configured prometheus rules as Yaml.""" + cherrypy.response.headers['Content-Type'] = 'text/plain' + with open(self.mgr.prometheus_alerts_path, 'r', encoding='utf-8') as f: + return f.read() diff --git a/src/pybind/mgr/cephadm/services/__init__.py b/src/pybind/mgr/cephadm/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/pybind/mgr/cephadm/services/cephadmservice.py b/src/pybind/mgr/cephadm/services/cephadmservice.py new file mode 100644 index 000000000..7d7a04dad --- /dev/null +++ b/src/pybind/mgr/cephadm/services/cephadmservice.py @@ -0,0 +1,1254 @@ +import errno +import json +import logging +import re +import socket +import time +from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING, List, Callable, TypeVar, \ + Optional, Dict, Any, Tuple, NewType, cast + +from mgr_module import HandleCommandResult, MonCommandFailed + +from ceph.deployment.service_spec import ( + ArgumentList, + CephExporterSpec, + GeneralArgList, + MONSpec, + RGWSpec, + ServiceSpec, +) +from ceph.deployment.utils import is_ipv6, unwrap_ipv6 +from mgr_util import build_url, merge_dicts +from orchestrator import OrchestratorError, DaemonDescription, DaemonDescriptionStatus +from orchestrator._interface import daemon_type_to_service +from cephadm import utils + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + +ServiceSpecs = TypeVar('ServiceSpecs', bound=ServiceSpec) +AuthEntity = NewType('AuthEntity', str) + + +def get_auth_entity(daemon_type: str, daemon_id: str, host: str = "") -> AuthEntity: + """ + Map the daemon id to a cephx keyring entity name + """ + # despite this mapping entity names to daemons, self.TYPE within + # the CephService class refers to service types, not daemon types + if daemon_type in ['rgw', 'rbd-mirror', 'cephfs-mirror', 'nfs', "iscsi", 'nvmeof', 'ingress', 'ceph-exporter']: + return AuthEntity(f'client.{daemon_type}.{daemon_id}') + elif daemon_type in ['crash', 'agent']: + if host == "": + raise OrchestratorError( + f'Host not provided to generate <{daemon_type}> auth entity name') + return AuthEntity(f'client.{daemon_type}.{host}') + elif daemon_type == 'mon': + return AuthEntity('mon.') + elif daemon_type in ['mgr', 'osd', 'mds']: + return AuthEntity(f'{daemon_type}.{daemon_id}') + else: + raise OrchestratorError(f"unknown daemon type {daemon_type}") + + +class CephadmDaemonDeploySpec: + # typing.NamedTuple + Generic is broken in py36 + def __init__(self, host: str, daemon_id: str, + service_name: str, + network: Optional[str] = None, + keyring: Optional[str] = None, + extra_args: Optional[List[str]] = None, + ceph_conf: str = '', + extra_files: Optional[Dict[str, Any]] = None, + daemon_type: Optional[str] = None, + ip: Optional[str] = None, + ports: Optional[List[int]] = None, + port_ips: Optional[Dict[str, str]] = None, + rank: Optional[int] = None, + rank_generation: Optional[int] = None, + extra_container_args: Optional[ArgumentList] = None, + extra_entrypoint_args: Optional[ArgumentList] = None, + ): + """ + A data struction to encapsulate `cephadm deploy ... + """ + self.host: str = host + self.daemon_id = daemon_id + self.service_name = service_name + daemon_type = daemon_type or (service_name.split('.')[0]) + assert daemon_type is not None + self.daemon_type: str = daemon_type + + # mons + self.network = network + + # for run_cephadm. + self.keyring: Optional[str] = keyring + + # FIXME: finish removing this + # For run_cephadm. Would be great to have more expressive names. + # self.extra_args: List[str] = extra_args or [] + assert not extra_args + + self.ceph_conf = ceph_conf + self.extra_files = extra_files or {} + + # TCP ports used by the daemon + self.ports: List[int] = ports or [] + # mapping of ports to IP addresses for ports + # we know we will only bind to on a specific IP. + # Useful for allowing multiple daemons to bind + # to the same port on different IPs on the same node + self.port_ips: Dict[str, str] = port_ips or {} + self.ip: Optional[str] = ip + + # values to be populated during generate_config calls + # and then used in _run_cephadm + self.final_config: Dict[str, Any] = {} + self.deps: List[str] = [] + + self.rank: Optional[int] = rank + self.rank_generation: Optional[int] = rank_generation + + self.extra_container_args = extra_container_args + self.extra_entrypoint_args = extra_entrypoint_args + + def name(self) -> str: + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def entity_name(self) -> str: + return get_auth_entity(self.daemon_type, self.daemon_id, host=self.host) + + def config_get_files(self) -> Dict[str, Any]: + files = self.extra_files + if self.ceph_conf: + files['config'] = self.ceph_conf + + return files + + @staticmethod + def from_daemon_description(dd: DaemonDescription) -> 'CephadmDaemonDeploySpec': + assert dd.hostname + assert dd.daemon_id + assert dd.daemon_type + return CephadmDaemonDeploySpec( + host=dd.hostname, + daemon_id=dd.daemon_id, + daemon_type=dd.daemon_type, + service_name=dd.service_name(), + ip=dd.ip, + ports=dd.ports, + rank=dd.rank, + rank_generation=dd.rank_generation, + extra_container_args=dd.extra_container_args, + extra_entrypoint_args=dd.extra_entrypoint_args, + ) + + def to_daemon_description(self, status: DaemonDescriptionStatus, status_desc: str) -> DaemonDescription: + return DaemonDescription( + daemon_type=self.daemon_type, + daemon_id=self.daemon_id, + service_name=self.service_name, + hostname=self.host, + status=status, + status_desc=status_desc, + ip=self.ip, + ports=self.ports, + rank=self.rank, + rank_generation=self.rank_generation, + extra_container_args=cast(GeneralArgList, self.extra_container_args), + extra_entrypoint_args=cast(GeneralArgList, self.extra_entrypoint_args), + ) + + @property + def extra_args(self) -> List[str]: + return [] + + +class CephadmService(metaclass=ABCMeta): + """ + Base class for service types. Often providing a create() and config() fn. + """ + + @property + @abstractmethod + def TYPE(self) -> str: + pass + + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr: "CephadmOrchestrator" = mgr + + def allow_colo(self) -> bool: + """ + Return True if multiple daemons of the same type can colocate on + the same host. + """ + return False + + def primary_daemon_type(self, spec: Optional[ServiceSpec] = None) -> str: + """ + This is the type of the primary (usually only) daemon to be deployed. + """ + return self.TYPE + + def per_host_daemon_type(self, spec: Optional[ServiceSpec] = None) -> Optional[str]: + """ + If defined, this type of daemon will be deployed once for each host + containing one or more daemons of the primary type. + """ + return None + + def ranked(self) -> bool: + """ + If True, we will assign a stable rank (0, 1, ...) and monotonically increasing + generation (0, 1, ...) to each daemon we create/deploy. + """ + return False + + def fence_old_ranks(self, + spec: ServiceSpec, + rank_map: Dict[int, Dict[int, Optional[str]]], + num_ranks: int) -> None: + assert False + + def make_daemon_spec( + self, + host: str, + daemon_id: str, + network: str, + spec: ServiceSpecs, + daemon_type: Optional[str] = None, + ports: Optional[List[int]] = None, + ip: Optional[str] = None, + rank: Optional[int] = None, + rank_generation: Optional[int] = None, + ) -> CephadmDaemonDeploySpec: + return CephadmDaemonDeploySpec( + host=host, + daemon_id=daemon_id, + service_name=spec.service_name(), + network=network, + daemon_type=daemon_type, + ports=ports, + ip=ip, + rank=rank, + rank_generation=rank_generation, + extra_container_args=spec.extra_container_args if hasattr( + spec, 'extra_container_args') else None, + extra_entrypoint_args=spec.extra_entrypoint_args if hasattr( + spec, 'extra_entrypoint_args') else None, + ) + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + raise NotImplementedError() + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + raise NotImplementedError() + + def config(self, spec: ServiceSpec) -> None: + """ + Configure the cluster for this service. Only called *once* per + service apply. Not for every daemon. + """ + pass + + def daemon_check_post(self, daemon_descrs: List[DaemonDescription]) -> None: + """The post actions needed to be done after daemons are checked""" + if self.mgr.config_dashboard: + if 'dashboard' in self.mgr.get('mgr_map')['modules']: + self.config_dashboard(daemon_descrs) + else: + logger.debug('Dashboard is not enabled. Skip configuration.') + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + """Config dashboard settings.""" + raise NotImplementedError() + + def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: + # if this is called for a service type where it hasn't explicitly been + # defined, return empty Daemon Desc + return DaemonDescription() + + def get_keyring_with_caps(self, entity: AuthEntity, caps: List[str]) -> str: + ret, keyring, err = self.mgr.mon_command({ + 'prefix': 'auth get-or-create', + 'entity': entity, + 'caps': caps, + }) + if err: + ret, out, err = self.mgr.mon_command({ + 'prefix': 'auth caps', + 'entity': entity, + 'caps': caps, + }) + if err: + self.mgr.log.warning(f"Unable to update caps for {entity}") + + # get keyring anyway + ret, keyring, err = self.mgr.mon_command({ + 'prefix': 'auth get', + 'entity': entity, + }) + if err: + raise OrchestratorError(f"Unable to fetch keyring for {entity}: {err}") + + # strip down keyring + # - don't include caps (auth get includes them; get-or-create does not) + # - use pending key if present + key = None + for line in keyring.splitlines(): + if ' = ' not in line: + continue + line = line.strip() + (ls, rs) = line.split(' = ', 1) + if ls == 'key' and not key: + key = rs + if ls == 'pending key': + key = rs + keyring = f'[{entity}]\nkey = {key}\n' + return keyring + + def _inventory_get_fqdn(self, hostname: str) -> str: + """Get a host's FQDN with its hostname. + + If the FQDN can't be resolved, the address from the inventory will + be returned instead. + """ + addr = self.mgr.inventory.get_addr(hostname) + return socket.getfqdn(addr) + + def _set_service_url_on_dashboard(self, + service_name: str, + get_mon_cmd: str, + set_mon_cmd: str, + service_url: str) -> None: + """A helper to get and set service_url via Dashboard's MON command. + + If result of get_mon_cmd differs from service_url, set_mon_cmd will + be sent to set the service_url. + """ + def get_set_cmd_dicts(out: str) -> List[dict]: + cmd_dict = { + 'prefix': set_mon_cmd, + 'value': service_url + } + return [cmd_dict] if service_url != out else [] + + self._check_and_set_dashboard( + service_name=service_name, + get_cmd=get_mon_cmd, + get_set_cmd_dicts=get_set_cmd_dicts + ) + + def _check_and_set_dashboard(self, + service_name: str, + get_cmd: str, + get_set_cmd_dicts: Callable[[str], List[dict]]) -> None: + """A helper to set configs in the Dashboard. + + The method is useful for the pattern: + - Getting a config from Dashboard by using a Dashboard command. e.g. current iSCSI + gateways. + - Parse or deserialize previous output. e.g. Dashboard command returns a JSON string. + - Determine if the config need to be update. NOTE: This step is important because if a + Dashboard command modified Ceph config, cephadm's config_notify() is called. Which + kicks the serve() loop and the logic using this method is likely to be called again. + A config should be updated only when needed. + - Update a config in Dashboard by using a Dashboard command. + + :param service_name: the service name to be used for logging + :type service_name: str + :param get_cmd: Dashboard command prefix to get config. e.g. dashboard get-grafana-api-url + :type get_cmd: str + :param get_set_cmd_dicts: function to create a list, and each item is a command dictionary. + e.g. + [ + { + 'prefix': 'dashboard iscsi-gateway-add', + 'service_url': 'http://admin:admin@aaa:5000', + 'name': 'aaa' + }, + { + 'prefix': 'dashboard iscsi-gateway-add', + 'service_url': 'http://admin:admin@bbb:5000', + 'name': 'bbb' + } + ] + The function should return empty list if no command need to be sent. + :type get_set_cmd_dicts: Callable[[str], List[dict]] + """ + + try: + _, out, _ = self.mgr.check_mon_command({ + 'prefix': get_cmd + }) + except MonCommandFailed as e: + logger.warning('Failed to get Dashboard config for %s: %s', service_name, e) + return + cmd_dicts = get_set_cmd_dicts(out.strip()) + for cmd_dict in list(cmd_dicts): + try: + inbuf = cmd_dict.pop('inbuf', None) + _, out, _ = self.mgr.check_mon_command(cmd_dict, inbuf) + except MonCommandFailed as e: + logger.warning('Failed to set Dashboard config for %s: %s', service_name, e) + + def ok_to_stop_osd( + self, + osds: List[str], + known: Optional[List[str]] = None, # output argument + force: bool = False) -> HandleCommandResult: + r = HandleCommandResult(*self.mgr.mon_command({ + 'prefix': "osd ok-to-stop", + 'ids': osds, + 'max': 16, + })) + j = None + try: + j = json.loads(r.stdout) + except json.decoder.JSONDecodeError: + self.mgr.log.warning("osd ok-to-stop didn't return structured result") + raise + if r.retval: + return r + if known is not None and j and j.get('ok_to_stop'): + self.mgr.log.debug(f"got {j}") + known.extend([f'osd.{x}' for x in j.get('osds', [])]) + return HandleCommandResult( + 0, + f'{",".join(["osd.%s" % o for o in osds])} {"is" if len(osds) == 1 else "are"} safe to restart', + '' + ) + + def ok_to_stop( + self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None # output argument + ) -> HandleCommandResult: + names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids] + out = f'It appears safe to stop {",".join(names)}' + err = f'It is NOT safe to stop {",".join(names)} at this time' + + if self.TYPE not in ['mon', 'osd', 'mds']: + logger.debug(out) + return HandleCommandResult(0, out) + + if self.TYPE == 'osd': + return self.ok_to_stop_osd(daemon_ids, known, force) + + r = HandleCommandResult(*self.mgr.mon_command({ + 'prefix': f'{self.TYPE} ok-to-stop', + 'ids': daemon_ids, + })) + + if r.retval: + err = f'{err}: {r.stderr}' if r.stderr else err + logger.debug(err) + return HandleCommandResult(r.retval, r.stdout, err) + + out = f'{out}: {r.stdout}' if r.stdout else out + logger.debug(out) + return HandleCommandResult(r.retval, out, r.stderr) + + def _enough_daemons_to_stop(self, daemon_type: str, daemon_ids: List[str], service: str, low_limit: int, alert: bool = False) -> Tuple[bool, str]: + # Provides a warning about if it possible or not to stop daemons in a service + names = [f'{daemon_type}.{d_id}' for d_id in daemon_ids] + number_of_running_daemons = len( + [daemon + for daemon in self.mgr.cache.get_daemons_by_type(daemon_type) + if daemon.status == DaemonDescriptionStatus.running]) + if (number_of_running_daemons - len(daemon_ids)) >= low_limit: + return False, f'It is presumed safe to stop {names}' + + num_daemons_left = number_of_running_daemons - len(daemon_ids) + + def plural(count: int) -> str: + return 'daemon' if count == 1 else 'daemons' + + left_count = "no" if num_daemons_left == 0 else num_daemons_left + + if alert: + out = (f'ALERT: Cannot stop {names} in {service} service. ' + f'Not enough remaining {service} daemons. ' + f'Please deploy at least {low_limit + 1} {service} daemons before stopping {names}. ') + else: + out = (f'WARNING: Stopping {len(daemon_ids)} out of {number_of_running_daemons} daemons in {service} service. ' + f'Service will not be operational with {left_count} {plural(num_daemons_left)} left. ' + f'At least {low_limit} {plural(low_limit)} must be running to guarantee service. ') + return True, out + + def pre_remove(self, daemon: DaemonDescription) -> None: + """ + Called before the daemon is removed. + """ + assert daemon.daemon_type is not None + assert self.TYPE == daemon_type_to_service(daemon.daemon_type) + logger.debug(f'Pre remove daemon {self.TYPE}.{daemon.daemon_id}') + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + """ + Called after the daemon is removed. + """ + assert daemon.daemon_type is not None + assert self.TYPE == daemon_type_to_service(daemon.daemon_type) + logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}') + + def purge(self, service_name: str) -> None: + """Called to carry out any purge tasks following service removal""" + logger.debug(f'Purge called for {self.TYPE} - no action taken') + + +class CephService(CephadmService): + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + # Ceph.daemons (mon, mgr, mds, osd, etc) + cephadm_config = self.get_config_and_keyring( + daemon_spec.daemon_type, + daemon_spec.daemon_id, + host=daemon_spec.host, + keyring=daemon_spec.keyring, + extra_ceph_config=daemon_spec.ceph_conf) + + if daemon_spec.config_get_files(): + cephadm_config.update({'files': daemon_spec.config_get_files()}) + + return cephadm_config, [] + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + super().post_remove(daemon, is_failed_deploy=is_failed_deploy) + self.remove_keyring(daemon) + + def get_auth_entity(self, daemon_id: str, host: str = "") -> AuthEntity: + return get_auth_entity(self.TYPE, daemon_id, host=host) + + def get_config_and_keyring(self, + daemon_type: str, + daemon_id: str, + host: str, + keyring: Optional[str] = None, + extra_ceph_config: Optional[str] = None + ) -> Dict[str, Any]: + # keyring + if not keyring: + entity: AuthEntity = self.get_auth_entity(daemon_id, host=host) + ret, keyring, err = self.mgr.check_mon_command({ + 'prefix': 'auth get', + 'entity': entity, + }) + config = self.mgr.get_minimal_ceph_conf() + + if extra_ceph_config: + config += extra_ceph_config + + return { + 'config': config, + 'keyring': keyring, + } + + def remove_keyring(self, daemon: DaemonDescription) -> None: + assert daemon.daemon_id is not None + assert daemon.hostname is not None + daemon_id: str = daemon.daemon_id + host: str = daemon.hostname + + assert daemon.daemon_type != 'mon' + + entity = self.get_auth_entity(daemon_id, host=host) + + logger.info(f'Removing key for {entity}') + ret, out, err = self.mgr.mon_command({ + 'prefix': 'auth rm', + 'entity': entity, + }) + + +class MonService(CephService): + TYPE = 'mon' + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + """ + Create a new monitor on the given host. + """ + assert self.TYPE == daemon_spec.daemon_type + name, _, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network + + # get mon. key + ret, keyring, err = self.mgr.check_mon_command({ + 'prefix': 'auth get', + 'entity': daemon_spec.entity_name(), + }) + + extra_config = '[mon.%s]\n' % name + if network: + # infer whether this is a CIDR network, addrvec, or plain IP + if '/' in network: + extra_config += 'public network = %s\n' % network + elif network.startswith('[v') and network.endswith(']'): + extra_config += 'public addrv = %s\n' % network + elif is_ipv6(network): + extra_config += 'public addr = %s\n' % unwrap_ipv6(network) + elif ':' not in network: + extra_config += 'public addr = %s\n' % network + else: + raise OrchestratorError( + 'Must specify a CIDR network, ceph addrvec, or plain IP: \'%s\'' % network) + else: + # try to get the public_network from the config + ret, network, err = self.mgr.check_mon_command({ + 'prefix': 'config get', + 'who': 'mon', + 'key': 'public_network', + }) + network = network.strip() if network else network + if not network: + raise OrchestratorError( + 'Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP') + if '/' not in network: + raise OrchestratorError( + 'public_network is set but does not look like a CIDR network: \'%s\'' % network) + extra_config += 'public network = %s\n' % network + + daemon_spec.ceph_conf = extra_config + daemon_spec.keyring = keyring + + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + def config(self, spec: ServiceSpec) -> None: + assert self.TYPE == spec.service_type + self.set_crush_locations(self.mgr.cache.get_daemons_by_type('mon'), spec) + + def _get_quorum_status(self) -> Dict[Any, Any]: + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'quorum_status', + }) + try: + j = json.loads(out) + except Exception as e: + raise OrchestratorError(f'failed to parse mon quorum status: {e}') + return j + + def _check_safe_to_destroy(self, mon_id: str) -> None: + quorum_status = self._get_quorum_status() + mons = [m['name'] for m in quorum_status['monmap']['mons']] + if mon_id not in mons: + logger.info('Safe to remove mon.%s: not in monmap (%s)' % ( + mon_id, mons)) + return + new_mons = [m for m in mons if m != mon_id] + new_quorum = [m for m in quorum_status['quorum_names'] if m != mon_id] + if len(new_quorum) > len(new_mons) / 2: + logger.info('Safe to remove mon.%s: new quorum should be %s (from %s)' % + (mon_id, new_quorum, new_mons)) + return + raise OrchestratorError( + 'Removing %s would break mon quorum (new quorum %s, new mons %s)' % (mon_id, new_quorum, new_mons)) + + def pre_remove(self, daemon: DaemonDescription) -> None: + super().pre_remove(daemon) + + assert daemon.daemon_id is not None + daemon_id: str = daemon.daemon_id + self._check_safe_to_destroy(daemon_id) + + # remove mon from quorum before we destroy the daemon + logger.info('Removing monitor %s from monmap...' % daemon_id) + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'mon rm', + 'name': daemon_id, + }) + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + # Do not remove the mon keyring. + # super().post_remove(daemon) + pass + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + daemon_spec.final_config, daemon_spec.deps = super().generate_config(daemon_spec) + + # realistically, we expect there to always be a mon spec + # in a real deployment, but the way teuthology deploys some daemons + # it's possible there might not be. For that reason we need to + # verify the service is present in the spec store. + if daemon_spec.service_name in self.mgr.spec_store: + mon_spec = cast(MONSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + if mon_spec.crush_locations: + if daemon_spec.host in mon_spec.crush_locations: + # the --crush-location flag only supports a single bucket=loc pair so + # others will have to be handled later. The idea is to set the flag + # for the first bucket=loc pair in the list in order to facilitate + # replacing a tiebreaker mon (https://docs.ceph.com/en/quincy/rados/operations/stretch-mode/#other-commands) + c_loc = mon_spec.crush_locations[daemon_spec.host][0] + daemon_spec.final_config['crush_location'] = c_loc + + return daemon_spec.final_config, daemon_spec.deps + + def set_crush_locations(self, daemon_descrs: List[DaemonDescription], spec: ServiceSpec) -> None: + logger.debug('Setting mon crush locations from spec') + if not daemon_descrs: + return + assert self.TYPE == spec.service_type + mon_spec = cast(MONSpec, spec) + + if not mon_spec.crush_locations: + return + + quorum_status = self._get_quorum_status() + mons_in_monmap = [m['name'] for m in quorum_status['monmap']['mons']] + for dd in daemon_descrs: + assert dd.daemon_id is not None + assert dd.hostname is not None + if dd.hostname not in mon_spec.crush_locations: + continue + if dd.daemon_id not in mons_in_monmap: + continue + # expected format for crush_locations from the quorum status is + # {bucket1=loc1,bucket2=loc2} etc. for the number of bucket=loc pairs + try: + current_crush_locs = [m['crush_location'] for m in quorum_status['monmap']['mons'] if m['name'] == dd.daemon_id][0] + except (KeyError, IndexError) as e: + logger.warning(f'Failed setting crush location for mon {dd.daemon_id}: {e}\n' + 'Mon may not have a monmap entry yet. Try re-applying mon spec once mon is confirmed up.') + desired_crush_locs = '{' + ','.join(mon_spec.crush_locations[dd.hostname]) + '}' + logger.debug(f'Found spec defined crush locations for mon on {dd.hostname}: {desired_crush_locs}') + logger.debug(f'Current crush locations for mon on {dd.hostname}: {current_crush_locs}') + if current_crush_locs != desired_crush_locs: + logger.info(f'Setting crush location for mon {dd.daemon_id} to {desired_crush_locs}') + try: + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'mon set_location', + 'name': dd.daemon_id, + 'args': mon_spec.crush_locations[dd.hostname] + }) + except Exception as e: + logger.error(f'Failed setting crush location for mon {dd.daemon_id}: {e}') + + +class MgrService(CephService): + TYPE = 'mgr' + + def allow_colo(self) -> bool: + if self.mgr.get_ceph_option('mgr_standby_modules'): + # traditional mgr mode: standby daemons' modules listen on + # ports and redirect to the primary. we must not schedule + # multiple mgrs on the same host or else ports will + # conflict. + return False + else: + # standby daemons do nothing, and therefore port conflicts + # are not a concern. + return True + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + """ + Create a new manager instance on a host. + """ + assert self.TYPE == daemon_spec.daemon_type + mgr_id, _ = daemon_spec.daemon_id, daemon_spec.host + + # get mgr. key + keyring = self.get_keyring_with_caps(self.get_auth_entity(mgr_id), + ['mon', 'profile mgr', + 'osd', 'allow *', + 'mds', 'allow *']) + + # Retrieve ports used by manager modules + # In the case of the dashboard port and with several manager daemons + # running in different hosts, it exists the possibility that the + # user has decided to use different dashboard ports in each server + # If this is the case then the dashboard port opened will be only the used + # as default. + ports = [] + ret, mgr_services, err = self.mgr.check_mon_command({ + 'prefix': 'mgr services', + }) + if mgr_services: + mgr_endpoints = json.loads(mgr_services) + for end_point in mgr_endpoints.values(): + port = re.search(r'\:\d+\/', end_point) + if port: + ports.append(int(port[0][1:-1])) + + if ports: + daemon_spec.ports = ports + + daemon_spec.ports.append(self.mgr.service_discovery_port) + daemon_spec.keyring = keyring + + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: + for daemon in daemon_descrs: + assert daemon.daemon_type is not None + assert daemon.daemon_id is not None + if self.mgr.daemon_is_self(daemon.daemon_type, daemon.daemon_id): + return daemon + # if no active mgr found, return empty Daemon Desc + return DaemonDescription() + + def fail_over(self) -> None: + # this has been seen to sometimes transiently fail even when there are multiple + # mgr daemons. As long as there are multiple known mgr daemons, we should retry. + class NoStandbyError(OrchestratorError): + pass + no_standby_exc = NoStandbyError('Need standby mgr daemon', event_kind_subject=( + 'daemon', 'mgr' + self.mgr.get_mgr_id())) + for sleep_secs in [2, 8, 15]: + try: + if not self.mgr_map_has_standby(): + raise no_standby_exc + self.mgr.events.for_daemon('mgr' + self.mgr.get_mgr_id(), + 'INFO', 'Failing over to other MGR') + logger.info('Failing over to other MGR') + + # fail over + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'mgr fail', + 'who': self.mgr.get_mgr_id(), + }) + return + except NoStandbyError: + logger.info( + f'Failed to find standby mgr for failover. Retrying in {sleep_secs} seconds') + time.sleep(sleep_secs) + raise no_standby_exc + + def mgr_map_has_standby(self) -> bool: + """ + This is a bit safer than asking our inventory. If the mgr joined the mgr map, + we know it joined the cluster + """ + mgr_map = self.mgr.get('mgr_map') + num = len(mgr_map.get('standbys')) + return bool(num) + + def ok_to_stop( + self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None # output argument + ) -> HandleCommandResult: + # ok to stop if there is more than 1 mgr and not trying to stop the active mgr + + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Mgr', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + mgr_daemons = self.mgr.cache.get_daemons_by_type(self.TYPE) + active = self.get_active_daemon(mgr_daemons).daemon_id + if active in daemon_ids: + warn_message = 'ALERT: Cannot stop active Mgr daemon, Please switch active Mgrs with \'ceph mgr fail %s\'' % active + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + return HandleCommandResult(0, warn_message, '') + + +class MdsService(CephService): + TYPE = 'mds' + + def allow_colo(self) -> bool: + return True + + def config(self, spec: ServiceSpec) -> None: + assert self.TYPE == spec.service_type + assert spec.service_id + + # ensure mds_join_fs is set for these daemons + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config set', + 'who': 'mds.' + spec.service_id, + 'name': 'mds_join_fs', + 'value': spec.service_id, + }) + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + mds_id, _ = daemon_spec.daemon_id, daemon_spec.host + + # get mds. key + keyring = self.get_keyring_with_caps(self.get_auth_entity(mds_id), + ['mon', 'profile mds', + 'osd', 'allow rw tag cephfs *=*', + 'mds', 'allow']) + daemon_spec.keyring = keyring + + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: + active_mds_strs = list() + for fs in self.mgr.get('fs_map')['filesystems']: + mds_map = fs['mdsmap'] + if mds_map is not None: + for mds_id, mds_status in mds_map['info'].items(): + if mds_status['state'] == 'up:active': + active_mds_strs.append(mds_status['name']) + if len(active_mds_strs) != 0: + for daemon in daemon_descrs: + if daemon.daemon_id in active_mds_strs: + return daemon + # if no mds found, return empty Daemon Desc + return DaemonDescription() + + def purge(self, service_name: str) -> None: + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': service_name, + 'name': 'mds_join_fs', + }) + + +class RgwService(CephService): + TYPE = 'rgw' + + def allow_colo(self) -> bool: + return True + + def config(self, spec: RGWSpec) -> None: # type: ignore + assert self.TYPE == spec.service_type + + # set rgw_realm rgw_zonegroup and rgw_zone, if present + if spec.rgw_realm: + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config set', + 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}", + 'name': 'rgw_realm', + 'value': spec.rgw_realm, + }) + if spec.rgw_zonegroup: + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config set', + 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}", + 'name': 'rgw_zonegroup', + 'value': spec.rgw_zonegroup, + }) + if spec.rgw_zone: + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config set', + 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}", + 'name': 'rgw_zone', + 'value': spec.rgw_zone, + }) + + if spec.rgw_frontend_ssl_certificate: + if isinstance(spec.rgw_frontend_ssl_certificate, list): + cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate) + elif isinstance(spec.rgw_frontend_ssl_certificate, str): + cert_data = spec.rgw_frontend_ssl_certificate + else: + raise OrchestratorError( + 'Invalid rgw_frontend_ssl_certificate: %s' + % spec.rgw_frontend_ssl_certificate) + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config-key set', + 'key': f'rgw/cert/{spec.service_name()}', + 'val': cert_data, + }) + + # TODO: fail, if we don't have a spec + logger.info('Saving service %s spec with placement %s' % ( + spec.service_name(), spec.placement.pretty_str())) + self.mgr.spec_store.save(spec) + self.mgr.trigger_connect_dashboard_rgw() + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + rgw_id, _ = daemon_spec.daemon_id, daemon_spec.host + spec = cast(RGWSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + + keyring = self.get_keyring(rgw_id) + + if daemon_spec.ports: + port = daemon_spec.ports[0] + else: + # this is a redeploy of older instance that doesn't have an explicitly + # assigned port, in which case we can assume there is only 1 per host + # and it matches the spec. + port = spec.get_port() + + # configure frontend + args = [] + ftype = spec.rgw_frontend_type or "beast" + if ftype == 'beast': + if spec.ssl: + if daemon_spec.ip: + args.append( + f"ssl_endpoint={build_url(host=daemon_spec.ip, port=port).lstrip('/')}") + else: + args.append(f"ssl_port={port}") + args.append(f"ssl_certificate=config://rgw/cert/{spec.service_name()}") + else: + if daemon_spec.ip: + args.append(f"endpoint={build_url(host=daemon_spec.ip, port=port).lstrip('/')}") + else: + args.append(f"port={port}") + elif ftype == 'civetweb': + if spec.ssl: + if daemon_spec.ip: + # note the 's' suffix on port + args.append(f"port={build_url(host=daemon_spec.ip, port=port).lstrip('/')}s") + else: + args.append(f"port={port}s") # note the 's' suffix on port + args.append(f"ssl_certificate=config://rgw/cert/{spec.service_name()}") + else: + if daemon_spec.ip: + args.append(f"port={build_url(host=daemon_spec.ip, port=port).lstrip('/')}") + else: + args.append(f"port={port}") + else: + raise OrchestratorError(f'Invalid rgw_frontend_type parameter: {ftype}. Valid values are: beast, civetweb.') + + if spec.rgw_frontend_extra_args is not None: + args.extend(spec.rgw_frontend_extra_args) + + frontend = f'{ftype} {" ".join(args)}' + + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config set', + 'who': utils.name_to_config_section(daemon_spec.name()), + 'name': 'rgw_frontends', + 'value': frontend + }) + + daemon_spec.keyring = keyring + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + def get_keyring(self, rgw_id: str) -> str: + keyring = self.get_keyring_with_caps(self.get_auth_entity(rgw_id), + ['mon', 'allow *', + 'mgr', 'allow rw', + 'osd', 'allow rwx tag rgw *=*']) + return keyring + + def purge(self, service_name: str) -> None: + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': utils.name_to_config_section(service_name), + 'name': 'rgw_realm', + }) + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': utils.name_to_config_section(service_name), + 'name': 'rgw_zone', + }) + self.mgr.check_mon_command({ + 'prefix': 'config-key rm', + 'key': f'rgw/cert/{service_name}', + }) + self.mgr.trigger_connect_dashboard_rgw() + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + super().post_remove(daemon, is_failed_deploy=is_failed_deploy) + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': utils.name_to_config_section(daemon.name()), + 'name': 'rgw_frontends', + }) + + def ok_to_stop( + self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None # output argument + ) -> HandleCommandResult: + # if load balancer (ingress) is present block if only 1 daemon up otherwise ok + # if no load balancer, warn if > 1 daemon, block if only 1 daemon + def ingress_present() -> bool: + running_ingress_daemons = [ + daemon for daemon in self.mgr.cache.get_daemons_by_type('ingress') if daemon.status == 1] + running_haproxy_daemons = [ + daemon for daemon in running_ingress_daemons if daemon.daemon_type == 'haproxy'] + running_keepalived_daemons = [ + daemon for daemon in running_ingress_daemons if daemon.daemon_type == 'keepalived'] + # check that there is at least one haproxy and keepalived daemon running + if running_haproxy_daemons and running_keepalived_daemons: + return True + return False + + # if only 1 rgw, alert user (this is not passable with --force) + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'RGW', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + # if reached here, there is > 1 rgw daemon. + # Say okay if load balancer present or force flag set + if ingress_present() or force: + return HandleCommandResult(0, warn_message, '') + + # if reached here, > 1 RGW daemon, no load balancer and no force flag. + # Provide warning + warn_message = "WARNING: Removing RGW daemons can cause clients to lose connectivity. " + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + self.mgr.trigger_connect_dashboard_rgw() + + +class RbdMirrorService(CephService): + TYPE = 'rbd-mirror' + + def allow_colo(self) -> bool: + return True + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_id, _ = daemon_spec.daemon_id, daemon_spec.host + + keyring = self.get_keyring_with_caps(self.get_auth_entity(daemon_id), + ['mon', 'profile rbd-mirror', + 'osd', 'profile rbd']) + + daemon_spec.keyring = keyring + + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + def ok_to_stop( + self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None # output argument + ) -> HandleCommandResult: + # if only 1 rbd-mirror, alert user (this is not passable with --force) + warn, warn_message = self._enough_daemons_to_stop( + self.TYPE, daemon_ids, 'Rbdmirror', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + return HandleCommandResult(0, warn_message, '') + + +class CrashService(CephService): + TYPE = 'crash' + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_id, host = daemon_spec.daemon_id, daemon_spec.host + + keyring = self.get_keyring_with_caps(self.get_auth_entity(daemon_id, host=host), + ['mon', 'profile crash', + 'mgr', 'profile crash']) + + daemon_spec.keyring = keyring + + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + +class CephExporterService(CephService): + TYPE = 'ceph-exporter' + DEFAULT_SERVICE_PORT = 9926 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + spec = cast(CephExporterSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + keyring = self.get_keyring_with_caps(self.get_auth_entity(daemon_spec.daemon_id), + ['mon', 'profile ceph-exporter', + 'mon', 'allow r', + 'mgr', 'allow r', + 'osd', 'allow r']) + exporter_config = {} + if spec.sock_dir: + exporter_config.update({'sock-dir': spec.sock_dir}) + if spec.port: + exporter_config.update({'port': f'{spec.port}'}) + if spec.prio_limit is not None: + exporter_config.update({'prio-limit': f'{spec.prio_limit}'}) + if spec.stats_period: + exporter_config.update({'stats-period': f'{spec.stats_period}'}) + + daemon_spec.keyring = keyring + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + daemon_spec.final_config = merge_dicts(daemon_spec.final_config, exporter_config) + return daemon_spec + + +class CephfsMirrorService(CephService): + TYPE = 'cephfs-mirror' + + def config(self, spec: ServiceSpec) -> None: + # make sure mirroring module is enabled + mgr_map = self.mgr.get('mgr_map') + mod_name = 'mirroring' + if mod_name not in mgr_map.get('services', {}): + self.mgr.check_mon_command({ + 'prefix': 'mgr module enable', + 'module': mod_name + }) + # we shouldn't get here (mon will tell the mgr to respawn), but no + # harm done if we do. + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + + ret, keyring, err = self.mgr.check_mon_command({ + 'prefix': 'auth get-or-create', + 'entity': daemon_spec.entity_name(), + 'caps': ['mon', 'profile cephfs-mirror', + 'mds', 'allow r', + 'osd', 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*', + 'mgr', 'allow r'], + }) + + daemon_spec.keyring = keyring + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + +class CephadmAgent(CephService): + TYPE = 'agent' + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_id, host = daemon_spec.daemon_id, daemon_spec.host + + if not self.mgr.http_server.agent: + raise OrchestratorError('Cannot deploy agent before creating cephadm endpoint') + + keyring = self.get_keyring_with_caps(self.get_auth_entity(daemon_id, host=host), []) + daemon_spec.keyring = keyring + self.mgr.agent_cache.agent_keys[host] = keyring + + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + agent = self.mgr.http_server.agent + try: + assert agent + assert agent.ssl_certs.get_root_cert() + assert agent.server_port + except Exception: + raise OrchestratorError( + 'Cannot deploy agent daemons until cephadm endpoint has finished generating certs') + + cfg = {'target_ip': self.mgr.get_mgr_ip(), + 'target_port': agent.server_port, + 'refresh_period': self.mgr.agent_refresh_rate, + 'listener_port': self.mgr.agent_starting_port, + 'host': daemon_spec.host, + 'device_enhanced_scan': str(self.mgr.device_enhanced_scan)} + + listener_cert, listener_key = agent.ssl_certs.generate_cert(daemon_spec.host, self.mgr.inventory.get_addr(daemon_spec.host)) + config = { + 'agent.json': json.dumps(cfg), + 'keyring': daemon_spec.keyring, + 'root_cert.pem': agent.ssl_certs.get_root_cert(), + 'listener.crt': listener_cert, + 'listener.key': listener_key, + } + + return config, sorted([str(self.mgr.get_mgr_ip()), str(agent.server_port), + agent.ssl_certs.get_root_cert(), + str(self.mgr.get_module_option('device_enhanced_scan'))]) diff --git a/src/pybind/mgr/cephadm/services/container.py b/src/pybind/mgr/cephadm/services/container.py new file mode 100644 index 000000000..b9cdfad5e --- /dev/null +++ b/src/pybind/mgr/cephadm/services/container.py @@ -0,0 +1,29 @@ +import logging +from typing import List, Any, Tuple, Dict, cast + +from ceph.deployment.service_spec import CustomContainerSpec + +from .cephadmservice import CephadmService, CephadmDaemonDeploySpec + +logger = logging.getLogger(__name__) + + +class CustomContainerService(CephadmService): + TYPE = 'container' + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) \ + -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) \ + -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + deps: List[str] = [] + spec = cast(CustomContainerSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + config: Dict[str, Any] = spec.config_json() + logger.debug( + 'Generated configuration for \'%s\' service: config-json=%s, dependencies=%s' % + (self.TYPE, config, deps)) + return config, deps diff --git a/src/pybind/mgr/cephadm/services/ingress.py b/src/pybind/mgr/cephadm/services/ingress.py new file mode 100644 index 000000000..55be30454 --- /dev/null +++ b/src/pybind/mgr/cephadm/services/ingress.py @@ -0,0 +1,381 @@ +import ipaddress +import logging +import random +import string +from typing import List, Dict, Any, Tuple, cast, Optional + +from ceph.deployment.service_spec import ServiceSpec, IngressSpec +from mgr_util import build_url +from cephadm import utils +from orchestrator import OrchestratorError, DaemonDescription +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec, CephService + +logger = logging.getLogger(__name__) + + +class IngressService(CephService): + TYPE = 'ingress' + MAX_KEEPALIVED_PASS_LEN = 8 + + def primary_daemon_type(self, spec: Optional[ServiceSpec] = None) -> str: + if spec: + ispec = cast(IngressSpec, spec) + # in keepalive only setups, we are only deploying keepalived, + # so that should be marked as the primary daemon type. Otherwise, + # we consider haproxy to be the primary. + if hasattr(spec, 'keepalive_only') and ispec.keepalive_only: + return 'keepalived' + return 'haproxy' + + def per_host_daemon_type(self, spec: Optional[ServiceSpec] = None) -> Optional[str]: + if spec: + ispec = cast(IngressSpec, spec) + # if we are using "keepalive_only" mode on this ingress service + # we are only deploying keepalived daemons, so there should + # only be a primary daemon type and the per host daemon type + # should be empty + if hasattr(spec, 'keepalive_only') and ispec.keepalive_only: + return None + return 'keepalived' + + def prepare_create( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> CephadmDaemonDeploySpec: + if daemon_spec.daemon_type == 'haproxy': + return self.haproxy_prepare_create(daemon_spec) + if daemon_spec.daemon_type == 'keepalived': + return self.keepalived_prepare_create(daemon_spec) + assert False, "unexpected daemon type" + + def generate_config( + self, + daemon_spec: CephadmDaemonDeploySpec + ) -> Tuple[Dict[str, Any], List[str]]: + if daemon_spec.daemon_type == 'haproxy': + return self.haproxy_generate_config(daemon_spec) + else: + return self.keepalived_generate_config(daemon_spec) + assert False, "unexpected daemon type" + + def haproxy_prepare_create( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> CephadmDaemonDeploySpec: + assert daemon_spec.daemon_type == 'haproxy' + + daemon_id = daemon_spec.daemon_id + host = daemon_spec.host + spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + + logger.debug('prepare_create haproxy.%s on host %s with spec %s' % ( + daemon_id, host, spec)) + + daemon_spec.final_config, daemon_spec.deps = self.haproxy_generate_config(daemon_spec) + + return daemon_spec + + def haproxy_generate_config( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> Tuple[Dict[str, Any], List[str]]: + spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + assert spec.backend_service + if spec.backend_service not in self.mgr.spec_store: + raise RuntimeError( + f'{spec.service_name()} backend service {spec.backend_service} does not exist') + backend_spec = self.mgr.spec_store[spec.backend_service].spec + daemons = self.mgr.cache.get_daemons_by_service(spec.backend_service) + deps = [d.name() for d in daemons] + + # generate password? + pw_key = f'{spec.service_name()}/monitor_password' + password = self.mgr.get_store(pw_key) + if password is None: + if not spec.monitor_password: + password = ''.join(random.choice(string.ascii_lowercase) + for _ in range(self.MAX_KEEPALIVED_PASS_LEN)) + self.mgr.set_store(pw_key, password) + else: + if spec.monitor_password: + self.mgr.set_store(pw_key, None) + if spec.monitor_password: + password = spec.monitor_password + + if backend_spec.service_type == 'nfs': + mode = 'tcp' + # we need to get the nfs daemon with the highest rank_generation for + # each rank we are currently deploying for the haproxy config + # for example if we had three (rank, rank_generation) pairs of + # (0, 0), (0, 1), (1, 0) we would want the nfs daemon corresponding + # to (0, 1) and (1, 0) because those are the two with the highest + # rank_generation for the existing ranks (0 and 1, with the highest + # rank_generation for rank 0 being 1 and highest rank_generation for + # rank 1 being 0) + ranked_daemons = [d for d in daemons if (d.rank is not None and d.rank_generation is not None)] + by_rank: Dict[int, DaemonDescription] = {} + for d in ranked_daemons: + # It doesn't seem like mypy can figure out that rank + # and rank_generation for both the daemon we're looping on + # and all those in by_rank cannot be None due to the filtering + # when creating the ranked_daemons list, which is why these + # seemingly unnecessary assertions are here. + assert d.rank is not None + if d.rank not in by_rank: + by_rank[d.rank] = d + else: + same_rank_nfs = by_rank[d.rank] + assert d.rank_generation is not None + assert same_rank_nfs.rank_generation is not None + # if we have multiple of the same rank. take the one + # with the highesr rank generation + if d.rank_generation > same_rank_nfs.rank_generation: + by_rank[d.rank] = d + servers = [] + + # try to establish how many ranks we *should* have + num_ranks = backend_spec.placement.count + if not num_ranks: + num_ranks = 1 + max(by_rank.keys()) + + for rank in range(num_ranks): + if rank in by_rank: + d = by_rank[rank] + assert d.ports + servers.append({ + 'name': f"{spec.backend_service}.{rank}", + 'ip': d.ip or utils.resolve_ip(self.mgr.inventory.get_addr(str(d.hostname))), + 'port': d.ports[0], + }) + else: + # offline/missing server; leave rank in place + servers.append({ + 'name': f"{spec.backend_service}.{rank}", + 'ip': '0.0.0.0', + 'port': 0, + }) + else: + mode = 'http' + servers = [ + { + 'name': d.name(), + 'ip': d.ip or utils.resolve_ip(self.mgr.inventory.get_addr(str(d.hostname))), + 'port': d.ports[0], + } for d in daemons if d.ports + ] + + host_ip = daemon_spec.ip or self.mgr.inventory.get_addr(daemon_spec.host) + server_opts = [] + if spec.enable_haproxy_protocol: + server_opts.append("send-proxy-v2") + logger.debug("enabled default server opts: %r", server_opts) + ip = '*' if spec.virtual_ips_list else str(spec.virtual_ip).split('/')[0] or daemon_spec.ip or '*' + frontend_port = daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port + if ip != '*' and frontend_port: + daemon_spec.port_ips = {str(frontend_port): ip} + haproxy_conf = self.mgr.template.render( + 'services/ingress/haproxy.cfg.j2', + { + 'spec': spec, + 'backend_spec': backend_spec, + 'mode': mode, + 'servers': servers, + 'user': spec.monitor_user or 'admin', + 'password': password, + 'ip': ip, + 'frontend_port': frontend_port, + 'monitor_port': daemon_spec.ports[1] if daemon_spec.ports else spec.monitor_port, + 'local_host_ip': host_ip, + 'default_server_opts': server_opts, + } + ) + config_files = { + 'files': { + "haproxy.cfg": haproxy_conf, + } + } + if spec.ssl_cert: + ssl_cert = spec.ssl_cert + if isinstance(ssl_cert, list): + ssl_cert = '\n'.join(ssl_cert) + config_files['files']['haproxy.pem'] = ssl_cert + + return config_files, sorted(deps) + + def keepalived_prepare_create( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> CephadmDaemonDeploySpec: + assert daemon_spec.daemon_type == 'keepalived' + + daemon_id = daemon_spec.daemon_id + host = daemon_spec.host + spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + + logger.debug('prepare_create keepalived.%s on host %s with spec %s' % ( + daemon_id, host, spec)) + + daemon_spec.final_config, daemon_spec.deps = self.keepalived_generate_config(daemon_spec) + + return daemon_spec + + def keepalived_generate_config( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> Tuple[Dict[str, Any], List[str]]: + spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + assert spec.backend_service + + # generate password? + pw_key = f'{spec.service_name()}/keepalived_password' + password = self.mgr.get_store(pw_key) + if password is None: + if not spec.keepalived_password: + password = ''.join(random.choice(string.ascii_lowercase) + for _ in range(self.MAX_KEEPALIVED_PASS_LEN)) + self.mgr.set_store(pw_key, password) + else: + if spec.keepalived_password: + self.mgr.set_store(pw_key, None) + if spec.keepalived_password: + password = spec.keepalived_password + + daemons = self.mgr.cache.get_daemons_by_service(spec.service_name()) + + if not daemons and not spec.keepalive_only: + raise OrchestratorError( + f'Failed to generate keepalived.conf: No daemons deployed for {spec.service_name()}') + + deps = sorted([d.name() for d in daemons if d.daemon_type == 'haproxy']) + + host = daemon_spec.host + hosts = sorted(list(set([host] + [str(d.hostname) for d in daemons]))) + + def _get_valid_interface_and_ip(vip: str, host: str) -> Tuple[str, str]: + # interface + bare_ip = ipaddress.ip_interface(vip).ip + host_ip = '' + interface = None + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet): + interface = list(ifaces.keys())[0] + host_ip = ifaces[interface][0] + logger.info( + f'{bare_ip} is in {subnet} on {host} interface {interface}' + ) + break + # try to find interface by matching spec.virtual_interface_networks + if not interface and spec.virtual_interface_networks: + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if subnet in spec.virtual_interface_networks: + interface = list(ifaces.keys())[0] + host_ip = ifaces[interface][0] + logger.info( + f'{spec.virtual_ip} will be configured on {host} interface ' + f'{interface} (which is in subnet {subnet})' + ) + break + if not interface: + raise OrchestratorError( + f"Unable to identify interface for {spec.virtual_ip} on {host}" + ) + return interface, host_ip + + # script to monitor health + script = '/usr/bin/false' + for d in daemons: + if d.hostname == host: + if d.daemon_type == 'haproxy': + assert d.ports + port = d.ports[1] # monitoring port + host_ip = d.ip or self.mgr.inventory.get_addr(d.hostname) + script = f'/usr/bin/curl {build_url(scheme="http", host=host_ip, port=port)}/health' + assert script + + states = [] + priorities = [] + virtual_ips = [] + + # Set state and priority. Have one master for each VIP. Or at least the first one as master if only one VIP. + if spec.virtual_ip: + virtual_ips.append(spec.virtual_ip) + if hosts[0] == host: + states.append('MASTER') + priorities.append(100) + else: + states.append('BACKUP') + priorities.append(90) + + elif spec.virtual_ips_list: + virtual_ips = spec.virtual_ips_list + if len(virtual_ips) > len(hosts): + raise OrchestratorError( + "Number of virtual IPs for ingress is greater than number of available hosts" + ) + for x in range(len(virtual_ips)): + if hosts[x] == host: + states.append('MASTER') + priorities.append(100) + else: + states.append('BACKUP') + priorities.append(90) + + # remove host, daemon is being deployed on from hosts list for + # other_ips in conf file and converter to ips + if host in hosts: + hosts.remove(host) + host_ips: List[str] = [] + other_ips: List[List[str]] = [] + interfaces: List[str] = [] + for vip in virtual_ips: + interface, ip = _get_valid_interface_and_ip(vip, host) + host_ips.append(ip) + interfaces.append(interface) + ips: List[str] = [] + for h in hosts: + _, ip = _get_valid_interface_and_ip(vip, h) + ips.append(ip) + other_ips.append(ips) + + # Use interface as vrrp_interface for vrrp traffic if vrrp_interface_network not set on the spec + vrrp_interfaces: List[str] = [] + if not spec.vrrp_interface_network: + vrrp_interfaces = interfaces + else: + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if subnet == spec.vrrp_interface_network: + vrrp_interface = [list(ifaces.keys())[0]] * len(interfaces) + logger.info( + f'vrrp will be configured on {host} interface ' + f'{vrrp_interface} (which is in subnet {subnet})' + ) + break + else: + raise OrchestratorError( + f"Unable to identify vrrp interface for {spec.vrrp_interface_network} on {host}" + ) + + keepalived_conf = self.mgr.template.render( + 'services/ingress/keepalived.conf.j2', + { + 'spec': spec, + 'script': script, + 'password': password, + 'interfaces': interfaces, + 'vrrp_interfaces': vrrp_interfaces, + 'virtual_ips': virtual_ips, + 'first_virtual_router_id': spec.first_virtual_router_id, + 'states': states, + 'priorities': priorities, + 'other_ips': other_ips, + 'host_ips': host_ips, + } + ) + + config_file = { + 'files': { + "keepalived.conf": keepalived_conf, + } + } + + return config_file, deps diff --git a/src/pybind/mgr/cephadm/services/iscsi.py b/src/pybind/mgr/cephadm/services/iscsi.py new file mode 100644 index 000000000..61b157b44 --- /dev/null +++ b/src/pybind/mgr/cephadm/services/iscsi.py @@ -0,0 +1,212 @@ +import errno +import json +import logging +import subprocess +from typing import List, cast, Optional +from ipaddress import ip_address, IPv6Address + +from mgr_module import HandleCommandResult +from ceph.deployment.service_spec import IscsiServiceSpec + +from orchestrator import DaemonDescription, DaemonDescriptionStatus +from .cephadmservice import CephadmDaemonDeploySpec, CephService +from .. import utils + +logger = logging.getLogger(__name__) + + +class IscsiService(CephService): + TYPE = 'iscsi' + + def config(self, spec: IscsiServiceSpec) -> None: # type: ignore + assert self.TYPE == spec.service_type + assert spec.pool + self.mgr._check_pool_exists(spec.pool, spec.service_name()) + + def get_trusted_ips(self, spec: IscsiServiceSpec) -> str: + # add active mgr ip address to trusted list so dashboard can access + trusted_ip_list = spec.trusted_ip_list if spec.trusted_ip_list else '' + mgr_ip = self.mgr.get_mgr_ip() + if mgr_ip not in [s.strip() for s in trusted_ip_list.split(',')]: + if trusted_ip_list: + trusted_ip_list += ',' + trusted_ip_list += mgr_ip + return trusted_ip_list + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + + spec = cast(IscsiServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + igw_id = daemon_spec.daemon_id + + keyring = self.get_keyring_with_caps(self.get_auth_entity(igw_id), + ['mon', 'profile rbd, ' + 'allow command "osd blocklist", ' + 'allow command "config-key get" with "key" prefix "iscsi/"', + 'mgr', 'allow command "service status"', + 'osd', 'allow rwx']) + + if spec.ssl_cert: + if isinstance(spec.ssl_cert, list): + cert_data = '\n'.join(spec.ssl_cert) + else: + cert_data = spec.ssl_cert + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config-key set', + 'key': f'iscsi/{utils.name_to_config_section("iscsi")}.{igw_id}/iscsi-gateway.crt', + 'val': cert_data, + }) + + if spec.ssl_key: + if isinstance(spec.ssl_key, list): + key_data = '\n'.join(spec.ssl_key) + else: + key_data = spec.ssl_key + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config-key set', + 'key': f'iscsi/{utils.name_to_config_section("iscsi")}.{igw_id}/iscsi-gateway.key', + 'val': key_data, + }) + + trusted_ip_list = self.get_trusted_ips(spec) + + context = { + 'client_name': '{}.{}'.format(utils.name_to_config_section('iscsi'), igw_id), + 'trusted_ip_list': trusted_ip_list, + 'spec': spec + } + igw_conf = self.mgr.template.render('services/iscsi/iscsi-gateway.cfg.j2', context) + + daemon_spec.keyring = keyring + daemon_spec.extra_files = {'iscsi-gateway.cfg': igw_conf} + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + daemon_spec.deps = [trusted_ip_list] + return daemon_spec + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + def get_set_cmd_dicts(out: str) -> List[dict]: + gateways = json.loads(out)['gateways'] + cmd_dicts = [] + # TODO: fail, if we don't have a spec + spec = cast(IscsiServiceSpec, + self.mgr.spec_store.all_specs.get(daemon_descrs[0].service_name(), None)) + if spec.api_secure and spec.ssl_cert and spec.ssl_key: + cmd_dicts.append({ + 'prefix': 'dashboard set-iscsi-api-ssl-verification', + 'value': "false" + }) + else: + cmd_dicts.append({ + 'prefix': 'dashboard set-iscsi-api-ssl-verification', + 'value': "true" + }) + for dd in daemon_descrs: + assert dd.hostname is not None + # todo: this can fail: + spec = cast(IscsiServiceSpec, + self.mgr.spec_store.all_specs.get(dd.service_name(), None)) + if not spec: + logger.warning('No ServiceSpec found for %s', dd) + continue + ip = utils.resolve_ip(self.mgr.inventory.get_addr(dd.hostname)) + # IPv6 URL encoding requires square brackets enclosing the ip + if type(ip_address(ip)) is IPv6Address: + ip = f'[{ip}]' + protocol = "http" + if spec.api_secure and spec.ssl_cert and spec.ssl_key: + protocol = "https" + service_url = '{}://{}:{}@{}:{}'.format( + protocol, spec.api_user or 'admin', spec.api_password or 'admin', ip, spec.api_port or '5000') + gw = gateways.get(dd.hostname) + if not gw or gw['service_url'] != service_url: + safe_service_url = '{}://{}:{}@{}:{}'.format( + protocol, '', '', ip, spec.api_port or '5000') + logger.info('Adding iSCSI gateway %s to Dashboard', safe_service_url) + cmd_dicts.append({ + 'prefix': 'dashboard iscsi-gateway-add', + 'inbuf': service_url, + 'name': dd.hostname + }) + return cmd_dicts + + self._check_and_set_dashboard( + service_name='iSCSI', + get_cmd='dashboard iscsi-gateway-list', + get_set_cmd_dicts=get_set_cmd_dicts + ) + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + # if only 1 iscsi, alert user (this is not passable with --force) + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Iscsi', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + # if reached here, there is > 1 nfs daemon. make sure none are down + warn_message = ( + 'ALERT: 1 iscsi daemon is already down. Please bring it back up before stopping this one') + iscsi_daemons = self.mgr.cache.get_daemons_by_type(self.TYPE) + for i in iscsi_daemons: + if i.status != DaemonDescriptionStatus.running: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids] + warn_message = f'It is presumed safe to stop {names}' + return HandleCommandResult(0, warn_message, '') + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + """ + Called after the daemon is removed. + """ + logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}') + + # remove config for dashboard iscsi gateways + ret, out, err = self.mgr.mon_command({ + 'prefix': 'dashboard iscsi-gateway-rm', + 'name': daemon.hostname, + }) + if not ret: + logger.info(f'{daemon.hostname} removed from iscsi gateways dashboard config') + + # needed to know if we have ssl stuff for iscsi in ceph config + iscsi_config_dict = {} + ret, iscsi_config, err = self.mgr.mon_command({ + 'prefix': 'config-key dump', + 'key': 'iscsi', + }) + if iscsi_config: + iscsi_config_dict = json.loads(iscsi_config) + + # remove iscsi cert and key from ceph config + for iscsi_key, value in iscsi_config_dict.items(): + if f'iscsi/client.{daemon.name()}/' in iscsi_key: + ret, out, err = self.mgr.mon_command({ + 'prefix': 'config-key rm', + 'key': iscsi_key, + }) + logger.info(f'{iscsi_key} removed from ceph config') + + def purge(self, service_name: str) -> None: + """Removes configuration + """ + spec = cast(IscsiServiceSpec, self.mgr.spec_store[service_name].spec) + try: + # remove service configuration from the pool + try: + subprocess.run(['rados', + '-k', str(self.mgr.get_ceph_option('keyring')), + '-n', f'mgr.{self.mgr.get_mgr_id()}', + '-p', cast(str, spec.pool), + 'rm', + 'gateway.conf'], + timeout=5) + logger.info(f' removed from {spec.pool}') + except subprocess.CalledProcessError as ex: + logger.error(f'Error executing <<{ex.cmd}>>: {ex.output}') + except subprocess.TimeoutExpired: + logger.error(f'timeout (5s) trying to remove from {spec.pool}') + + except Exception: + logger.exception(f'failed to purge {service_name}') diff --git a/src/pybind/mgr/cephadm/services/jaeger.py b/src/pybind/mgr/cephadm/services/jaeger.py new file mode 100644 index 000000000..c136d20e6 --- /dev/null +++ b/src/pybind/mgr/cephadm/services/jaeger.py @@ -0,0 +1,73 @@ +from typing import List, cast +from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec +from ceph.deployment.service_spec import TracingSpec +from mgr_util import build_url + + +class ElasticSearchService(CephadmService): + TYPE = 'elasticsearch' + DEFAULT_SERVICE_PORT = 9200 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + return daemon_spec + + +class JaegerAgentService(CephadmService): + TYPE = 'jaeger-agent' + DEFAULT_SERVICE_PORT = 6799 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + collectors = [] + for dd in self.mgr.cache.get_daemons_by_type(JaegerCollectorService.TYPE): + # scrape jaeger-collector nodes + assert dd.hostname is not None + port = dd.ports[0] if dd.ports else JaegerCollectorService.DEFAULT_SERVICE_PORT + url = build_url(host=dd.hostname, port=port).lstrip('/') + collectors.append(url) + daemon_spec.final_config = {'collector_nodes': ",".join(collectors)} + return daemon_spec + + +class JaegerCollectorService(CephadmService): + TYPE = 'jaeger-collector' + DEFAULT_SERVICE_PORT = 14250 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + elasticsearch_nodes = get_elasticsearch_nodes(self, daemon_spec) + daemon_spec.final_config = {'elasticsearch_nodes': ",".join(elasticsearch_nodes)} + return daemon_spec + + +class JaegerQueryService(CephadmService): + TYPE = 'jaeger-query' + DEFAULT_SERVICE_PORT = 16686 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + elasticsearch_nodes = get_elasticsearch_nodes(self, daemon_spec) + daemon_spec.final_config = {'elasticsearch_nodes': ",".join(elasticsearch_nodes)} + return daemon_spec + + +def get_elasticsearch_nodes(service: CephadmService, daemon_spec: CephadmDaemonDeploySpec) -> List[str]: + elasticsearch_nodes = [] + for dd in service.mgr.cache.get_daemons_by_type(ElasticSearchService.TYPE): + assert dd.hostname is not None + addr = dd.ip if dd.ip else service.mgr.inventory.get_addr(dd.hostname) + port = dd.ports[0] if dd.ports else ElasticSearchService.DEFAULT_SERVICE_PORT + url = build_url(host=addr, port=port).lstrip('/') + elasticsearch_nodes.append(f'http://{url}') + + if len(elasticsearch_nodes) == 0: + # takes elasticsearch address from TracingSpec + spec: TracingSpec = cast( + TracingSpec, service.mgr.spec_store.active_specs[daemon_spec.service_name]) + assert spec.es_nodes is not None + urls = spec.es_nodes.split(",") + for url in urls: + elasticsearch_nodes.append(f'http://{url}') + + return elasticsearch_nodes diff --git a/src/pybind/mgr/cephadm/services/monitoring.py b/src/pybind/mgr/cephadm/services/monitoring.py new file mode 100644 index 000000000..114c84860 --- /dev/null +++ b/src/pybind/mgr/cephadm/services/monitoring.py @@ -0,0 +1,688 @@ +import errno +import ipaddress +import logging +import os +import socket +from typing import List, Any, Tuple, Dict, Optional, cast +from urllib.parse import urlparse + +from mgr_module import HandleCommandResult + +from orchestrator import DaemonDescription +from ceph.deployment.service_spec import AlertManagerSpec, GrafanaSpec, ServiceSpec, \ + SNMPGatewaySpec, PrometheusSpec +from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec +from mgr_util import verify_tls, ServerConfigException, create_self_signed_cert, build_url, get_cert_issuer_info, password_hash +from ceph.deployment.utils import wrap_ipv6 + +logger = logging.getLogger(__name__) + + +class GrafanaService(CephadmService): + TYPE = 'grafana' + DEFAULT_SERVICE_PORT = 3000 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials() + deps = [] # type: List[str] + if self.mgr.secure_monitoring_stack and prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') + deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}') + + prom_services = [] # type: List[str] + for dd in self.mgr.cache.get_daemons_by_service('prometheus'): + assert dd.hostname is not None + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + port = dd.ports[0] if dd.ports else 9095 + protocol = 'https' if self.mgr.secure_monitoring_stack else 'http' + prom_services.append(build_url(scheme=protocol, host=addr, port=port)) + + deps.append(dd.name()) + + daemons = self.mgr.cache.get_daemons_by_service('loki') + loki_host = '' + for i, dd in enumerate(daemons): + assert dd.hostname is not None + if i == 0: + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + loki_host = build_url(scheme='http', host=addr, port=3100) + + deps.append(dd.name()) + + root_cert = self.mgr.http_server.service_discovery.ssl_certs.get_root_cert() + oneline_root_cert = '\\n'.join([line.strip() for line in root_cert.splitlines()]) + grafana_data_sources = self.mgr.template.render('services/grafana/ceph-dashboard.yml.j2', + {'hosts': prom_services, + 'prometheus_user': prometheus_user, + 'prometheus_password': prometheus_password, + 'cephadm_root_ca': oneline_root_cert, + 'security_enabled': self.mgr.secure_monitoring_stack, + 'loki_host': loki_host}) + + spec: GrafanaSpec = cast( + GrafanaSpec, self.mgr.spec_store.active_specs[daemon_spec.service_name]) + grafana_ini = self.mgr.template.render( + 'services/grafana/grafana.ini.j2', { + 'anonymous_access': spec.anonymous_access, + 'initial_admin_password': spec.initial_admin_password, + 'http_port': daemon_spec.ports[0] if daemon_spec.ports else self.DEFAULT_SERVICE_PORT, + 'protocol': spec.protocol, + 'http_addr': daemon_spec.ip if daemon_spec.ip else '' + }) + + if 'dashboard' in self.mgr.get('mgr_map')['modules'] and spec.initial_admin_password: + self.mgr.check_mon_command( + {'prefix': 'dashboard set-grafana-api-password'}, inbuf=spec.initial_admin_password) + + cert, pkey = self.prepare_certificates(daemon_spec) + config_file = { + 'files': { + "grafana.ini": grafana_ini, + 'provisioning/datasources/ceph-dashboard.yml': grafana_data_sources, + 'certs/cert_file': '# generated by cephadm\n%s' % cert, + 'certs/cert_key': '# generated by cephadm\n%s' % pkey, + } + } + return config_file, sorted(deps) + + def prepare_certificates(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[str, str]: + cert_path = f'{daemon_spec.host}/grafana_crt' + key_path = f'{daemon_spec.host}/grafana_key' + cert = self.mgr.get_store(cert_path) + pkey = self.mgr.get_store(key_path) + certs_present = (cert and pkey) + is_valid_certificate = False + (org, cn) = (None, None) + if certs_present: + try: + (org, cn) = get_cert_issuer_info(cert) + verify_tls(cert, pkey) + is_valid_certificate = True + except ServerConfigException as e: + logger.warning(f'Provided grafana TLS certificates are invalid: {e}') + + if is_valid_certificate: + # let's clear health error just in case it was set + self.mgr.remove_health_warning('CEPHADM_CERT_ERROR') + return cert, pkey + + # certificate is not valid, to avoid overwriting user generated + # certificates we only re-generate in case of self signed certificates + # that were originally generated by cephadm or in case cert/key are empty. + if not certs_present or (org == 'Ceph' and cn == 'cephadm'): + logger.info('Regenerating cephadm self-signed grafana TLS certificates') + host_fqdn = socket.getfqdn(daemon_spec.host) + cert, pkey = create_self_signed_cert('Ceph', host_fqdn) + self.mgr.set_store(cert_path, cert) + self.mgr.set_store(key_path, pkey) + if 'dashboard' in self.mgr.get('mgr_map')['modules']: + self.mgr.check_mon_command({ + 'prefix': 'dashboard set-grafana-api-ssl-verify', + 'value': 'false', + }) + self.mgr.remove_health_warning('CEPHADM_CERT_ERROR') # clear if any + else: + # the certificate was not generated by cephadm, we cannot overwrite + # it by new self-signed ones. Let's warn the user to fix the issue + err_msg = """ + Detected invalid grafana certificates. Set mgr/cephadm/grafana_crt + and mgr/cephadm/grafana_key to valid certificates or reset their value + to an empty string in case you want cephadm to generate self-signed Grafana + certificates. + + Once done, run the following command to reconfig the daemon: + + > ceph orch daemon reconfig + + """ + self.mgr.set_health_warning( + 'CEPHADM_CERT_ERROR', 'Invalid grafana certificate: ', 1, [err_msg]) + + return cert, pkey + + def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: + # Use the least-created one as the active daemon + if daemon_descrs: + return daemon_descrs[-1] + # if empty list provided, return empty Daemon Desc + return DaemonDescription() + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + # TODO: signed cert + dd = self.get_active_daemon(daemon_descrs) + assert dd.hostname is not None + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + port = dd.ports[0] if dd.ports else self.DEFAULT_SERVICE_PORT + spec = cast(GrafanaSpec, self.mgr.spec_store[dd.service_name()].spec) + service_url = build_url(scheme=spec.protocol, host=addr, port=port) + self._set_service_url_on_dashboard( + 'Grafana', + 'dashboard get-grafana-api-url', + 'dashboard set-grafana-api-url', + service_url + ) + + def pre_remove(self, daemon: DaemonDescription) -> None: + """ + Called before grafana daemon is removed. + """ + if daemon.hostname is not None: + # delete cert/key entires for this grafana daemon + cert_path = f'{daemon.hostname}/grafana_crt' + key_path = f'{daemon.hostname}/grafana_key' + self.mgr.set_store(cert_path, None) + self.mgr.set_store(key_path, None) + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Grafana', 1) + if warn and not force: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + return HandleCommandResult(0, warn_message, '') + + +class AlertmanagerService(CephadmService): + TYPE = 'alertmanager' + DEFAULT_SERVICE_PORT = 9093 + USER_CFG_KEY = 'alertmanager/web_user' + PASS_CFG_KEY = 'alertmanager/web_password' + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + deps: List[str] = [] + default_webhook_urls: List[str] = [] + + spec = cast(AlertManagerSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + try: + secure = spec.secure + except AttributeError: + secure = False + user_data = spec.user_data + if 'default_webhook_urls' in user_data and isinstance( + user_data['default_webhook_urls'], list): + default_webhook_urls.extend(user_data['default_webhook_urls']) + + # dashboard(s) + dashboard_urls: List[str] = [] + snmp_gateway_urls: List[str] = [] + mgr_map = self.mgr.get('mgr_map') + port = None + proto = None # http: or https: + url = mgr_map.get('services', {}).get('dashboard', None) + if url: + p_result = urlparse(url.rstrip('/')) + hostname = socket.getfqdn(p_result.hostname) + + try: + ip = ipaddress.ip_address(hostname) + except ValueError: + pass + else: + if ip.version == 6: + hostname = f'[{hostname}]' + + dashboard_urls.append( + f'{p_result.scheme}://{hostname}:{p_result.port}{p_result.path}') + proto = p_result.scheme + port = p_result.port + + # scan all mgrs to generate deps and to get standbys too. + # assume that they are all on the same port as the active mgr. + for dd in self.mgr.cache.get_daemons_by_service('mgr'): + # we consider mgr a dep even if the dashboard is disabled + # in order to be consistent with _calc_daemon_deps(). + deps.append(dd.name()) + if not port: + continue + if dd.daemon_id == self.mgr.get_mgr_id(): + continue + assert dd.hostname is not None + addr = self._inventory_get_fqdn(dd.hostname) + dashboard_urls.append(build_url(scheme=proto, host=addr, port=port).rstrip('/')) + + for dd in self.mgr.cache.get_daemons_by_service('snmp-gateway'): + assert dd.hostname is not None + assert dd.ports + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + deps.append(dd.name()) + + snmp_gateway_urls.append(build_url(scheme='http', host=addr, + port=dd.ports[0], path='/alerts')) + + context = { + 'secure_monitoring_stack': self.mgr.secure_monitoring_stack, + 'dashboard_urls': dashboard_urls, + 'default_webhook_urls': default_webhook_urls, + 'snmp_gateway_urls': snmp_gateway_urls, + 'secure': secure, + } + yml = self.mgr.template.render('services/alertmanager/alertmanager.yml.j2', context) + + peers = [] + port = 9094 + for dd in self.mgr.cache.get_daemons_by_service('alertmanager'): + assert dd.hostname is not None + deps.append(dd.name()) + addr = self._inventory_get_fqdn(dd.hostname) + peers.append(build_url(host=addr, port=port).lstrip('/')) + + deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}') + + if self.mgr.secure_monitoring_stack: + alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials() + if alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') + node_ip = self.mgr.inventory.get_addr(daemon_spec.host) + host_fqdn = self._inventory_get_fqdn(daemon_spec.host) + cert, key = self.mgr.http_server.service_discovery.ssl_certs.generate_cert( + host_fqdn, node_ip) + context = { + 'alertmanager_web_user': alertmanager_user, + 'alertmanager_web_password': password_hash(alertmanager_password), + } + return { + "files": { + "alertmanager.yml": yml, + 'alertmanager.crt': cert, + 'alertmanager.key': key, + 'web.yml': self.mgr.template.render('services/alertmanager/web.yml.j2', context), + 'root_cert.pem': self.mgr.http_server.service_discovery.ssl_certs.get_root_cert() + }, + 'peers': peers, + 'web_config': '/etc/alertmanager/web.yml' + }, sorted(deps) + else: + return { + "files": { + "alertmanager.yml": yml + }, + "peers": peers + }, sorted(deps) + + def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: + # TODO: if there are multiple daemons, who is the active one? + if daemon_descrs: + return daemon_descrs[0] + # if empty list provided, return empty Daemon Desc + return DaemonDescription() + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + dd = self.get_active_daemon(daemon_descrs) + assert dd.hostname is not None + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + port = dd.ports[0] if dd.ports else self.DEFAULT_SERVICE_PORT + protocol = 'https' if self.mgr.secure_monitoring_stack else 'http' + service_url = build_url(scheme=protocol, host=addr, port=port) + self._set_service_url_on_dashboard( + 'AlertManager', + 'dashboard get-alertmanager-api-host', + 'dashboard set-alertmanager-api-host', + service_url + ) + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Alertmanager', 1) + if warn and not force: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + return HandleCommandResult(0, warn_message, '') + + +class PrometheusService(CephadmService): + TYPE = 'prometheus' + DEFAULT_SERVICE_PORT = 9095 + DEFAULT_MGR_PROMETHEUS_PORT = 9283 + USER_CFG_KEY = 'prometheus/web_user' + PASS_CFG_KEY = 'prometheus/web_password' + + def config(self, spec: ServiceSpec) -> None: + # make sure module is enabled + mgr_map = self.mgr.get('mgr_map') + if 'prometheus' not in mgr_map.get('services', {}): + self.mgr.check_mon_command({ + 'prefix': 'mgr module enable', + 'module': 'prometheus' + }) + # we shouldn't get here (mon will tell the mgr to respawn), but no + # harm done if we do. + + def prepare_create( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config( + self, + daemon_spec: CephadmDaemonDeploySpec, + ) -> Tuple[Dict[str, Any], List[str]]: + + assert self.TYPE == daemon_spec.daemon_type + spec = cast(PrometheusSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + + try: + retention_time = spec.retention_time if spec.retention_time else '15d' + except AttributeError: + retention_time = '15d' + + try: + retention_size = spec.retention_size if spec.retention_size else '0' + except AttributeError: + # default to disabled + retention_size = '0' + + # build service discovery end-point + port = self.mgr.service_discovery_port + mgr_addr = wrap_ipv6(self.mgr.get_mgr_ip()) + protocol = 'https' if self.mgr.secure_monitoring_stack else 'http' + srv_end_point = f'{protocol}://{mgr_addr}:{port}/sd/prometheus/sd-config?' + + node_exporter_cnt = len(self.mgr.cache.get_daemons_by_service('node-exporter')) + alertmgr_cnt = len(self.mgr.cache.get_daemons_by_service('alertmanager')) + haproxy_cnt = len(self.mgr.cache.get_daemons_by_type('ingress')) + node_exporter_sd_url = f'{srv_end_point}service=node-exporter' if node_exporter_cnt > 0 else None + alertmanager_sd_url = f'{srv_end_point}service=alertmanager' if alertmgr_cnt > 0 else None + haproxy_sd_url = f'{srv_end_point}service=haproxy' if haproxy_cnt > 0 else None + mgr_prometheus_sd_url = f'{srv_end_point}service=mgr-prometheus' # always included + ceph_exporter_sd_url = f'{srv_end_point}service=ceph-exporter' # always included + + alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials() + prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials() + + # generate the prometheus configuration + context = { + 'alertmanager_web_user': alertmanager_user, + 'alertmanager_web_password': alertmanager_password, + 'secure_monitoring_stack': self.mgr.secure_monitoring_stack, + 'service_discovery_username': self.mgr.http_server.service_discovery.username, + 'service_discovery_password': self.mgr.http_server.service_discovery.password, + 'mgr_prometheus_sd_url': mgr_prometheus_sd_url, + 'node_exporter_sd_url': node_exporter_sd_url, + 'alertmanager_sd_url': alertmanager_sd_url, + 'haproxy_sd_url': haproxy_sd_url, + 'ceph_exporter_sd_url': ceph_exporter_sd_url + } + + web_context = { + 'prometheus_web_user': prometheus_user, + 'prometheus_web_password': password_hash(prometheus_password), + } + + if self.mgr.secure_monitoring_stack: + cfg_key = 'mgr/prometheus/root/cert' + cmd = {'prefix': 'config-key get', 'key': cfg_key} + ret, mgr_prometheus_rootca, err = self.mgr.mon_command(cmd) + if ret != 0: + logger.error(f'mon command to get config-key {cfg_key} failed: {err}') + else: + node_ip = self.mgr.inventory.get_addr(daemon_spec.host) + host_fqdn = self._inventory_get_fqdn(daemon_spec.host) + cert, key = self.mgr.http_server.service_discovery.ssl_certs.generate_cert(host_fqdn, node_ip) + r: Dict[str, Any] = { + 'files': { + 'prometheus.yml': self.mgr.template.render('services/prometheus/prometheus.yml.j2', context), + 'root_cert.pem': self.mgr.http_server.service_discovery.ssl_certs.get_root_cert(), + 'mgr_prometheus_cert.pem': mgr_prometheus_rootca, + 'web.yml': self.mgr.template.render('services/prometheus/web.yml.j2', web_context), + 'prometheus.crt': cert, + 'prometheus.key': key, + }, + 'retention_time': retention_time, + 'retention_size': retention_size, + 'web_config': '/etc/prometheus/web.yml' + } + else: + r = { + 'files': { + 'prometheus.yml': self.mgr.template.render('services/prometheus/prometheus.yml.j2', context) + }, + 'retention_time': retention_time, + 'retention_size': retention_size + } + + # include alerts, if present in the container + if os.path.exists(self.mgr.prometheus_alerts_path): + with open(self.mgr.prometheus_alerts_path, 'r', encoding='utf-8') as f: + alerts = f.read() + r['files']['/etc/prometheus/alerting/ceph_alerts.yml'] = alerts + + # Include custom alerts if present in key value store. This enables the + # users to add custom alerts. Write the file in any case, so that if the + # content of the key value store changed, that file is overwritten + # (emptied in case they value has been removed from the key value + # store). This prevents the necessity to adapt `cephadm` binary to + # remove the file. + # + # Don't use the template engine for it as + # + # 1. the alerts are always static and + # 2. they are a template themselves for the Go template engine, which + # use curly braces and escaping that is cumbersome and unnecessary + # for the user. + # + r['files']['/etc/prometheus/alerting/custom_alerts.yml'] = \ + self.mgr.get_store('services/prometheus/alerting/custom_alerts.yml', '') + + return r, sorted(self.calculate_deps()) + + def calculate_deps(self) -> List[str]: + deps = [] # type: List[str] + port = cast(int, self.mgr.get_module_option_ex('prometheus', 'server_port', self.DEFAULT_MGR_PROMETHEUS_PORT)) + deps.append(str(port)) + deps.append(str(self.mgr.service_discovery_port)) + # add an explicit dependency on the active manager. This will force to + # re-deploy prometheus if the mgr has changed (due to a fail-over i.e). + deps.append(self.mgr.get_active_mgr().name()) + if self.mgr.secure_monitoring_stack: + alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials() + prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials() + if prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') + if alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') + deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}') + # add dependency on ceph-exporter daemons + deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('ceph-exporter')] + deps += [s for s in ['node-exporter', 'alertmanager'] if self.mgr.cache.get_daemons_by_service(s)] + if len(self.mgr.cache.get_daemons_by_type('ingress')) > 0: + deps.append('ingress') + return deps + + def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: + # TODO: if there are multiple daemons, who is the active one? + if daemon_descrs: + return daemon_descrs[0] + # if empty list provided, return empty Daemon Desc + return DaemonDescription() + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + dd = self.get_active_daemon(daemon_descrs) + assert dd.hostname is not None + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + port = dd.ports[0] if dd.ports else self.DEFAULT_SERVICE_PORT + protocol = 'https' if self.mgr.secure_monitoring_stack else 'http' + service_url = build_url(scheme=protocol, host=addr, port=port) + self._set_service_url_on_dashboard( + 'Prometheus', + 'dashboard get-prometheus-api-host', + 'dashboard set-prometheus-api-host', + service_url + ) + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Prometheus', 1) + if warn and not force: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + return HandleCommandResult(0, warn_message, '') + + +class NodeExporterService(CephadmService): + TYPE = 'node-exporter' + DEFAULT_SERVICE_PORT = 9100 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + deps = [f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}'] + if self.mgr.secure_monitoring_stack: + node_ip = self.mgr.inventory.get_addr(daemon_spec.host) + host_fqdn = self._inventory_get_fqdn(daemon_spec.host) + cert, key = self.mgr.http_server.service_discovery.ssl_certs.generate_cert( + host_fqdn, node_ip) + r = { + 'files': { + 'web.yml': self.mgr.template.render('services/node-exporter/web.yml.j2', {}), + 'root_cert.pem': self.mgr.http_server.service_discovery.ssl_certs.get_root_cert(), + 'node_exporter.crt': cert, + 'node_exporter.key': key, + }, + 'web_config': '/etc/node-exporter/web.yml' + } + else: + r = {} + + return r, deps + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + # since node exporter runs on each host and cannot compromise data, no extra checks required + names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids] + out = f'It is presumed safe to stop {names}' + return HandleCommandResult(0, out, '') + + +class LokiService(CephadmService): + TYPE = 'loki' + DEFAULT_SERVICE_PORT = 3100 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + deps: List[str] = [] + + yml = self.mgr.template.render('services/loki.yml.j2') + return { + "files": { + "loki.yml": yml + } + }, sorted(deps) + + +class PromtailService(CephadmService): + TYPE = 'promtail' + DEFAULT_SERVICE_PORT = 9080 + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + deps: List[str] = [] + + daemons = self.mgr.cache.get_daemons_by_service('loki') + loki_host = '' + for i, dd in enumerate(daemons): + assert dd.hostname is not None + if i == 0: + loki_host = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + + deps.append(dd.name()) + + context = { + 'client_hostname': loki_host, + } + + yml = self.mgr.template.render('services/promtail.yml.j2', context) + return { + "files": { + "promtail.yml": yml + } + }, sorted(deps) + + +class SNMPGatewayService(CephadmService): + TYPE = 'snmp-gateway' + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + deps: List[str] = [] + + spec = cast(SNMPGatewaySpec, self.mgr.spec_store[daemon_spec.service_name].spec) + config = { + "destination": spec.snmp_destination, + "snmp_version": spec.snmp_version, + } + if spec.snmp_version == 'V2c': + community = spec.credentials.get('snmp_community', None) + assert community is not None + + config.update({ + "snmp_community": community + }) + else: + # SNMP v3 settings can be either authNoPriv or authPriv + auth_protocol = 'SHA' if not spec.auth_protocol else spec.auth_protocol + + auth_username = spec.credentials.get('snmp_v3_auth_username', None) + auth_password = spec.credentials.get('snmp_v3_auth_password', None) + assert auth_username is not None + assert auth_password is not None + assert spec.engine_id is not None + + config.update({ + "snmp_v3_auth_protocol": auth_protocol, + "snmp_v3_auth_username": auth_username, + "snmp_v3_auth_password": auth_password, + "snmp_v3_engine_id": spec.engine_id, + }) + # authPriv adds encryption + if spec.privacy_protocol: + priv_password = spec.credentials.get('snmp_v3_priv_password', None) + assert priv_password is not None + + config.update({ + "snmp_v3_priv_protocol": spec.privacy_protocol, + "snmp_v3_priv_password": priv_password, + }) + + logger.debug( + f"Generated configuration for '{self.TYPE}' service. Dependencies={deps}") + + return config, sorted(deps) diff --git a/src/pybind/mgr/cephadm/services/nfs.py b/src/pybind/mgr/cephadm/services/nfs.py new file mode 100644 index 000000000..f94a00f5b --- /dev/null +++ b/src/pybind/mgr/cephadm/services/nfs.py @@ -0,0 +1,331 @@ +import errno +import ipaddress +import logging +import os +import subprocess +import tempfile +from typing import Dict, Tuple, Any, List, cast, Optional + +from mgr_module import HandleCommandResult +from mgr_module import NFS_POOL_NAME as POOL_NAME + +from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec + +from orchestrator import DaemonDescription + +from cephadm.services.cephadmservice import AuthEntity, CephadmDaemonDeploySpec, CephService + +logger = logging.getLogger(__name__) + + +class NFSService(CephService): + TYPE = 'nfs' + + def ranked(self) -> bool: + return True + + def fence(self, daemon_id: str) -> None: + logger.info(f'Fencing old nfs.{daemon_id}') + ret, out, err = self.mgr.mon_command({ + 'prefix': 'auth rm', + 'entity': f'client.nfs.{daemon_id}', + }) + + # TODO: block/fence this entity (in case it is still running somewhere) + + def fence_old_ranks(self, + spec: ServiceSpec, + rank_map: Dict[int, Dict[int, Optional[str]]], + num_ranks: int) -> None: + for rank, m in list(rank_map.items()): + if rank >= num_ranks: + for daemon_id in m.values(): + if daemon_id is not None: + self.fence(daemon_id) + del rank_map[rank] + nodeid = f'{spec.service_name()}.{rank}' + self.mgr.log.info(f'Removing {nodeid} from the ganesha grace table') + self.run_grace_tool(cast(NFSServiceSpec, spec), 'remove', nodeid) + self.mgr.spec_store.save_rank_map(spec.service_name(), rank_map) + else: + max_gen = max(m.keys()) + for gen, daemon_id in list(m.items()): + if gen < max_gen: + if daemon_id is not None: + self.fence(daemon_id) + del rank_map[rank][gen] + self.mgr.spec_store.save_rank_map(spec.service_name(), rank_map) + + def config(self, spec: NFSServiceSpec) -> None: # type: ignore + from nfs.cluster import create_ganesha_pool + + assert self.TYPE == spec.service_type + create_ganesha_pool(self.mgr) + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + return daemon_spec + + def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: + assert self.TYPE == daemon_spec.daemon_type + + daemon_type = daemon_spec.daemon_type + daemon_id = daemon_spec.daemon_id + host = daemon_spec.host + spec = cast(NFSServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + + deps: List[str] = [] + + nodeid = f'{daemon_spec.service_name}.{daemon_spec.rank}' + + # create the RADOS recovery pool keyring + rados_user = f'{daemon_type}.{daemon_id}' + rados_keyring = self.create_keyring(daemon_spec) + + # ensure rank is known to ganesha + self.mgr.log.info(f'Ensuring {nodeid} is in the ganesha grace table') + self.run_grace_tool(spec, 'add', nodeid) + + # create the rados config object + self.create_rados_config_obj(spec) + + # create the RGW keyring + rgw_user = f'{rados_user}-rgw' + rgw_keyring = self.create_rgw_keyring(daemon_spec) + if spec.virtual_ip: + bind_addr = spec.virtual_ip + else: + bind_addr = daemon_spec.ip if daemon_spec.ip else '' + if not bind_addr: + logger.warning(f'Bind address in {daemon_type}.{daemon_id}\'s ganesha conf is defaulting to empty') + else: + logger.debug("using haproxy bind address: %r", bind_addr) + + # generate the ganesha config + def get_ganesha_conf() -> str: + context: Dict[str, Any] = { + "user": rados_user, + "nodeid": nodeid, + "pool": POOL_NAME, + "namespace": spec.service_id, + "rgw_user": rgw_user, + "url": f'rados://{POOL_NAME}/{spec.service_id}/{spec.rados_config_name()}', + # fall back to default NFS port if not present in daemon_spec + "port": daemon_spec.ports[0] if daemon_spec.ports else 2049, + "bind_addr": bind_addr, + "haproxy_hosts": [], + } + if spec.enable_haproxy_protocol: + context["haproxy_hosts"] = self._haproxy_hosts() + logger.debug("selected haproxy_hosts: %r", context["haproxy_hosts"]) + return self.mgr.template.render('services/nfs/ganesha.conf.j2', context) + + # generate the cephadm config json + def get_cephadm_config() -> Dict[str, Any]: + config: Dict[str, Any] = {} + config['pool'] = POOL_NAME + config['namespace'] = spec.service_id + config['userid'] = rados_user + config['extra_args'] = ['-N', 'NIV_EVENT'] + config['files'] = { + 'ganesha.conf': get_ganesha_conf(), + } + config.update( + self.get_config_and_keyring( + daemon_type, daemon_id, + keyring=rados_keyring, + host=host + ) + ) + config['rgw'] = { + 'cluster': 'ceph', + 'user': rgw_user, + 'keyring': rgw_keyring, + } + logger.debug('Generated cephadm config-json: %s' % config) + return config + + return get_cephadm_config(), deps + + def create_rados_config_obj(self, + spec: NFSServiceSpec, + clobber: bool = False) -> None: + objname = spec.rados_config_name() + cmd = [ + 'rados', + '-n', f"mgr.{self.mgr.get_mgr_id()}", + '-k', str(self.mgr.get_ceph_option('keyring')), + '-p', POOL_NAME, + '--namespace', cast(str, spec.service_id), + ] + result = subprocess.run( + cmd + ['get', objname, '-'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + timeout=10) + if not result.returncode and not clobber: + logger.info('Rados config object exists: %s' % objname) + else: + logger.info('Creating rados config object: %s' % objname) + result = subprocess.run( + cmd + ['put', objname, '-'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + timeout=10) + if result.returncode: + self.mgr.log.warning( + f'Unable to create rados config object {objname}: {result.stderr.decode("utf-8")}' + ) + raise RuntimeError(result.stderr.decode("utf-8")) + + def create_keyring(self, daemon_spec: CephadmDaemonDeploySpec) -> str: + daemon_id = daemon_spec.daemon_id + spec = cast(NFSServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + entity: AuthEntity = self.get_auth_entity(daemon_id) + + osd_caps = 'allow rw pool=%s namespace=%s' % (POOL_NAME, spec.service_id) + + logger.info('Creating key for %s' % entity) + keyring = self.get_keyring_with_caps(entity, + ['mon', 'allow r', + 'osd', osd_caps]) + + return keyring + + def create_rgw_keyring(self, daemon_spec: CephadmDaemonDeploySpec) -> str: + daemon_id = daemon_spec.daemon_id + entity: AuthEntity = self.get_auth_entity(f'{daemon_id}-rgw') + + logger.info('Creating key for %s' % entity) + keyring = self.get_keyring_with_caps(entity, + ['mon', 'allow r', + 'osd', 'allow rwx tag rgw *=*']) + + return keyring + + def run_grace_tool(self, + spec: NFSServiceSpec, + action: str, + nodeid: str) -> None: + # write a temp keyring and referencing config file. this is a kludge + # because the ganesha-grace-tool can only authenticate as a client (and + # not a mgr). Also, it doesn't allow you to pass a keyring location via + # the command line, nor does it parse the CEPH_ARGS env var. + tmp_id = f'mgr.nfs.grace.{spec.service_name()}' + entity = AuthEntity(f'client.{tmp_id}') + keyring = self.get_keyring_with_caps( + entity, + ['mon', 'allow r', 'osd', f'allow rwx pool {POOL_NAME}'] + ) + tmp_keyring = tempfile.NamedTemporaryFile(mode='w', prefix='mgr-grace-keyring') + os.fchmod(tmp_keyring.fileno(), 0o600) + tmp_keyring.write(keyring) + tmp_keyring.flush() + tmp_conf = tempfile.NamedTemporaryFile(mode='w', prefix='mgr-grace-conf') + tmp_conf.write(self.mgr.get_minimal_ceph_conf()) + tmp_conf.write(f'\tkeyring = {tmp_keyring.name}\n') + tmp_conf.flush() + try: + cmd: List[str] = [ + 'ganesha-rados-grace', + '--cephconf', tmp_conf.name, + '--userid', tmp_id, + '--pool', POOL_NAME, + '--ns', cast(str, spec.service_id), + action, nodeid, + ] + self.mgr.log.debug(cmd) + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + timeout=10) + if result.returncode: + self.mgr.log.warning( + f'ganesha-rados-grace tool failed: {result.stderr.decode("utf-8")}' + ) + raise RuntimeError(f'grace tool failed: {result.stderr.decode("utf-8")}') + + finally: + self.mgr.check_mon_command({ + 'prefix': 'auth rm', + 'entity': entity, + }) + + def remove_rgw_keyring(self, daemon: DaemonDescription) -> None: + assert daemon.daemon_id is not None + daemon_id: str = daemon.daemon_id + entity: AuthEntity = self.get_auth_entity(f'{daemon_id}-rgw') + + logger.info(f'Removing key for {entity}') + self.mgr.check_mon_command({ + 'prefix': 'auth rm', + 'entity': entity, + }) + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + super().post_remove(daemon, is_failed_deploy=is_failed_deploy) + self.remove_rgw_keyring(daemon) + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + # if only 1 nfs, alert user (this is not passable with --force) + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'NFS', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + # if reached here, there is > 1 nfs daemon. + if force: + return HandleCommandResult(0, warn_message, '') + + # if reached here, > 1 nfs daemon and no force flag. + # Provide warning + warn_message = "WARNING: Removing NFS daemons can cause clients to lose connectivity. " + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + def purge(self, service_name: str) -> None: + if service_name not in self.mgr.spec_store: + return + spec = cast(NFSServiceSpec, self.mgr.spec_store[service_name].spec) + + logger.info(f'Removing grace file for {service_name}') + cmd = [ + 'rados', + '-n', f"mgr.{self.mgr.get_mgr_id()}", + '-k', str(self.mgr.get_ceph_option('keyring')), + '-p', POOL_NAME, + '--namespace', cast(str, spec.service_id), + 'rm', 'grace', + ] + subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + timeout=10 + ) + + def _haproxy_hosts(self) -> List[str]: + # NB: Ideally, we would limit the list to IPs on hosts running + # haproxy/ingress only, but due to the nature of cephadm today + # we'd "only know the set of haproxy hosts after they've been + # deployed" (quoth @adk7398). As it is today we limit the list + # of hosts we know are managed by cephadm. That ought to be + # good enough to prevent acceping haproxy protocol messages + # from "rouge" systems that are not under our control. At + # least until we learn otherwise. + cluster_ips: List[str] = [] + for host in self.mgr.inventory.keys(): + default_addr = self.mgr.inventory.get_addr(host) + cluster_ips.append(default_addr) + nets = self.mgr.cache.networks.get(host) + if not nets: + continue + for subnet, iface in nets.items(): + ip_subnet = ipaddress.ip_network(subnet) + if ipaddress.ip_address(default_addr) in ip_subnet: + continue # already present + if ip_subnet.is_loopback or ip_subnet.is_link_local: + continue # ignore special subnets + addrs: List[str] = sum((addr_list for addr_list in iface.values()), []) + if addrs: + # one address per interface/subnet is enough + cluster_ips.append(addrs[0]) + return cluster_ips diff --git a/src/pybind/mgr/cephadm/services/nvmeof.py b/src/pybind/mgr/cephadm/services/nvmeof.py new file mode 100644 index 000000000..7d2dd16cf --- /dev/null +++ b/src/pybind/mgr/cephadm/services/nvmeof.py @@ -0,0 +1,93 @@ +import errno +import logging +import json +from typing import List, cast, Optional + +from mgr_module import HandleCommandResult +from ceph.deployment.service_spec import NvmeofServiceSpec + +from orchestrator import DaemonDescription, DaemonDescriptionStatus +from .cephadmservice import CephadmDaemonDeploySpec, CephService +from .. import utils + +logger = logging.getLogger(__name__) + + +class NvmeofService(CephService): + TYPE = 'nvmeof' + + def config(self, spec: NvmeofServiceSpec) -> None: # type: ignore + assert self.TYPE == spec.service_type + assert spec.pool + self.mgr._check_pool_exists(spec.pool, spec.service_name()) + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + + spec = cast(NvmeofServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + nvmeof_gw_id = daemon_spec.daemon_id + host_ip = self.mgr.inventory.get_addr(daemon_spec.host) + + keyring = self.get_keyring_with_caps(self.get_auth_entity(nvmeof_gw_id), + ['mon', 'profile rbd', + 'osd', 'allow all tag rbd *=*']) + + # TODO: check if we can force jinja2 to generate dicts with double quotes instead of using json.dumps + transport_tcp_options = json.dumps(spec.transport_tcp_options) if spec.transport_tcp_options else None + name = '{}.{}'.format(utils.name_to_config_section('nvmeof'), nvmeof_gw_id) + rados_id = name[len('client.'):] if name.startswith('client.') else name + context = { + 'spec': spec, + 'name': name, + 'addr': host_ip, + 'port': spec.port, + 'log_level': 'WARN', + 'rpc_socket': '/var/tmp/spdk.sock', + 'transport_tcp_options': transport_tcp_options, + 'rados_id': rados_id + } + gw_conf = self.mgr.template.render('services/nvmeof/ceph-nvmeof.conf.j2', context) + + daemon_spec.keyring = keyring + daemon_spec.extra_files = {'ceph-nvmeof.conf': gw_conf} + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + daemon_spec.deps = [] + return daemon_spec + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + # TODO: what integration do we need with the dashboard? + pass + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + # if only 1 nvmeof, alert user (this is not passable with --force) + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Nvmeof', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + # if reached here, there is > 1 nvmeof daemon. make sure none are down + warn_message = ('ALERT: 1 nvmeof daemon is already down. Please bring it back up before stopping this one') + nvmeof_daemons = self.mgr.cache.get_daemons_by_type(self.TYPE) + for i in nvmeof_daemons: + if i.status != DaemonDescriptionStatus.running: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids] + warn_message = f'It is presumed safe to stop {names}' + return HandleCommandResult(0, warn_message, '') + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + """ + Called after the daemon is removed. + """ + logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}') + # TODO: remove config for dashboard nvmeof gateways if any + # and any certificates being used for mTLS + + def purge(self, service_name: str) -> None: + """Removes configuration + """ + # TODO: what should we purge in this case (if any)? + pass diff --git a/src/pybind/mgr/cephadm/services/osd.py b/src/pybind/mgr/cephadm/services/osd.py new file mode 100644 index 000000000..bfecc5723 --- /dev/null +++ b/src/pybind/mgr/cephadm/services/osd.py @@ -0,0 +1,972 @@ +import json +import logging +from asyncio import gather +from threading import Lock +from typing import List, Dict, Any, Set, Tuple, cast, Optional, TYPE_CHECKING + +from ceph.deployment import translate +from ceph.deployment.drive_group import DriveGroupSpec +from ceph.deployment.drive_selection import DriveSelection +from ceph.deployment.inventory import Device +from ceph.utils import datetime_to_str, str_to_datetime + +from datetime import datetime +import orchestrator +from cephadm.serve import CephadmServe +from cephadm.utils import SpecialHostLabels +from ceph.utils import datetime_now +from orchestrator import OrchestratorError, DaemonDescription +from mgr_module import MonCommandFailed + +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec, CephService + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + + +class OSDService(CephService): + TYPE = 'osd' + + def create_from_spec(self, drive_group: DriveGroupSpec) -> str: + logger.debug(f"Processing DriveGroup {drive_group}") + osd_id_claims = OsdIdClaims(self.mgr) + if osd_id_claims.get(): + logger.info( + f"Found osd claims for drivegroup {drive_group.service_id} -> {osd_id_claims.get()}") + + async def create_from_spec_one(host: str, drive_selection: DriveSelection) -> Optional[str]: + # skip this host if there has been no change in inventory + if not self.mgr.cache.osdspec_needs_apply(host, drive_group): + self.mgr.log.debug("skipping apply of %s on %s (no change)" % ( + host, drive_group)) + return None + # skip this host if we cannot schedule here + if self.mgr.inventory.has_label(host, SpecialHostLabels.DRAIN_DAEMONS): + return None + + osd_id_claims_for_host = osd_id_claims.filtered_by_host(host) + + cmds: List[str] = self.driveselection_to_ceph_volume(drive_selection, + osd_id_claims_for_host) + if not cmds: + logger.debug("No data_devices, skipping DriveGroup: {}".format( + drive_group.service_id)) + return None + + logger.debug('Applying service osd.%s on host %s...' % ( + drive_group.service_id, host + )) + start_ts = datetime_now() + env_vars: List[str] = [f"CEPH_VOLUME_OSDSPEC_AFFINITY={drive_group.service_id}"] + ret_msg = await self.create_single_host( + drive_group, host, cmds, + replace_osd_ids=osd_id_claims_for_host, env_vars=env_vars + ) + self.mgr.cache.update_osdspec_last_applied( + host, drive_group.service_name(), start_ts + ) + self.mgr.cache.save_host(host) + return ret_msg + + async def all_hosts() -> List[Optional[str]]: + futures = [create_from_spec_one(h, ds) + for h, ds in self.prepare_drivegroup(drive_group)] + return await gather(*futures) + + with self.mgr.async_timeout_handler('cephadm deploy (osd daemon)'): + ret = self.mgr.wait_async(all_hosts()) + return ", ".join(filter(None, ret)) + + async def create_single_host(self, + drive_group: DriveGroupSpec, + host: str, cmds: List[str], replace_osd_ids: List[str], + env_vars: Optional[List[str]] = None) -> str: + for cmd in cmds: + out, err, code = await self._run_ceph_volume_command(host, cmd, env_vars=env_vars) + if code == 1 and ', it is already prepared' in '\n'.join(err): + # HACK: when we create against an existing LV, ceph-volume + # returns an error and the above message. To make this + # command idempotent, tolerate this "error" and continue. + logger.debug('the device was already prepared; continuing') + code = 0 + if code: + raise RuntimeError( + 'cephadm exited with an error code: %d, stderr:%s' % ( + code, '\n'.join(err))) + return await self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(), + replace_osd_ids) + + async def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str, + replace_osd_ids: Optional[List[str]] = None) -> str: + + if replace_osd_ids is None: + replace_osd_ids = OsdIdClaims(self.mgr).filtered_by_host(host) + assert replace_osd_ids is not None + + # check result: lvm + osds_elems: dict = await CephadmServe(self.mgr)._run_cephadm_json( + host, 'osd', 'ceph-volume', + [ + '--', + 'lvm', 'list', + '--format', 'json', + ]) + before_osd_uuid_map = self.mgr.get_osd_uuid_map(only_up=True) + fsid = self.mgr._cluster_fsid + osd_uuid_map = self.mgr.get_osd_uuid_map() + created = [] + for osd_id, osds in osds_elems.items(): + for osd in osds: + if osd['type'] == 'db': + continue + if osd['tags']['ceph.cluster_fsid'] != fsid: + logger.debug('mismatched fsid, skipping %s' % osd) + continue + if osd_id in before_osd_uuid_map and osd_id not in replace_osd_ids: + # if it exists but is part of the replacement operation, don't skip + continue + if self.mgr.cache.has_daemon(f'osd.{osd_id}', host): + # cephadm daemon instance already exists + logger.debug(f'osd id {osd_id} daemon already exists') + continue + if osd_id not in osd_uuid_map: + logger.debug('osd id {} does not exist in cluster'.format(osd_id)) + continue + if osd_uuid_map.get(osd_id) != osd['tags']['ceph.osd_fsid']: + logger.debug('mismatched osd uuid (cluster has %s, osd ' + 'has %s)' % ( + osd_uuid_map.get(osd_id), + osd['tags']['ceph.osd_fsid'])) + continue + + created.append(osd_id) + daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec( + service_name=service_name, + daemon_id=str(osd_id), + host=host, + daemon_type='osd', + ) + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + await CephadmServe(self.mgr)._create_daemon( + daemon_spec, + osd_uuid_map=osd_uuid_map) + + # check result: raw + raw_elems: dict = await CephadmServe(self.mgr)._run_cephadm_json( + host, 'osd', 'ceph-volume', + [ + '--', + 'raw', 'list', + '--format', 'json', + ]) + for osd_uuid, osd in raw_elems.items(): + if osd.get('ceph_fsid') != fsid: + continue + osd_id = str(osd.get('osd_id', '-1')) + if osd_id in before_osd_uuid_map and osd_id not in replace_osd_ids: + # if it exists but is part of the replacement operation, don't skip + continue + if self.mgr.cache.has_daemon(f'osd.{osd_id}', host): + # cephadm daemon instance already exists + logger.debug(f'osd id {osd_id} daemon already exists') + continue + if osd_id not in osd_uuid_map: + logger.debug('osd id {} does not exist in cluster'.format(osd_id)) + continue + if osd_uuid_map.get(osd_id) != osd_uuid: + logger.debug('mismatched osd uuid (cluster has %s, osd ' + 'has %s)' % (osd_uuid_map.get(osd_id), osd_uuid)) + continue + if osd_id in created: + continue + + created.append(osd_id) + daemon_spec = CephadmDaemonDeploySpec( + service_name=service_name, + daemon_id=osd_id, + host=host, + daemon_type='osd', + ) + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + await CephadmServe(self.mgr)._create_daemon( + daemon_spec, + osd_uuid_map=osd_uuid_map) + + if created: + self.mgr.cache.invalidate_host_devices(host) + self.mgr.cache.invalidate_autotune(host) + return "Created osd(s) %s on host '%s'" % (','.join(created), host) + else: + return "Created no osd(s) on host %s; already created?" % host + + def prepare_drivegroup(self, drive_group: DriveGroupSpec) -> List[Tuple[str, DriveSelection]]: + # 1) use fn_filter to determine matching_hosts + matching_hosts = drive_group.placement.filter_matching_hostspecs( + self.mgr.cache.get_schedulable_hosts()) + # 2) Map the inventory to the InventoryHost object + host_ds_map = [] + + # set osd_id_claims + + def _find_inv_for_host(hostname: str, inventory_dict: dict) -> List[Device]: + # This is stupid and needs to be loaded with the host + for _host, _inventory in inventory_dict.items(): + if _host == hostname: + return _inventory + raise OrchestratorError("No inventory found for host: {}".format(hostname)) + + # 3) iterate over matching_host and call DriveSelection + logger.debug(f"Checking matching hosts -> {matching_hosts}") + for host in matching_hosts: + inventory_for_host = _find_inv_for_host(host, self.mgr.cache.devices) + logger.debug(f"Found inventory for host {inventory_for_host}") + + # List of Daemons on that host + dd_for_spec = self.mgr.cache.get_daemons_by_service(drive_group.service_name()) + dd_for_spec_and_host = [dd for dd in dd_for_spec if dd.hostname == host] + + drive_selection = DriveSelection(drive_group, inventory_for_host, + existing_daemons=len(dd_for_spec_and_host)) + logger.debug(f"Found drive selection {drive_selection}") + if drive_group.method and drive_group.method == 'raw': + # ceph-volume can currently only handle a 1:1 mapping + # of data/db/wal devices for raw mode osds. If db/wal devices + # are defined and the number does not match the number of data + # devices, we need to bail out + if drive_selection.data_devices() and drive_selection.db_devices(): + if len(drive_selection.data_devices()) != len(drive_selection.db_devices()): + raise OrchestratorError('Raw mode only supports a 1:1 ratio of data to db devices. Found ' + f'{len(drive_selection.data_devices())} potential data device(s) and ' + f'{len(drive_selection.db_devices())} potential db device(s) on host {host}') + if drive_selection.data_devices() and drive_selection.wal_devices(): + if len(drive_selection.data_devices()) != len(drive_selection.wal_devices()): + raise OrchestratorError('Raw mode only supports a 1:1 ratio of data to wal devices. Found ' + f'{len(drive_selection.data_devices())} potential data device(s) and ' + f'{len(drive_selection.wal_devices())} potential wal device(s) on host {host}') + host_ds_map.append((host, drive_selection)) + return host_ds_map + + @staticmethod + def driveselection_to_ceph_volume(drive_selection: DriveSelection, + osd_id_claims: Optional[List[str]] = None, + preview: bool = False) -> List[str]: + logger.debug(f"Translating DriveGroup <{drive_selection.spec}> to ceph-volume command") + cmds: List[str] = translate.to_ceph_volume(drive_selection, + osd_id_claims, preview=preview).run() + logger.debug(f"Resulting ceph-volume cmds: {cmds}") + return cmds + + def get_previews(self, host: str) -> List[Dict[str, Any]]: + # Find OSDSpecs that match host. + osdspecs = self.resolve_osdspecs_for_host(host) + return self.generate_previews(osdspecs, host) + + def generate_previews(self, osdspecs: List[DriveGroupSpec], for_host: str) -> List[Dict[str, Any]]: + """ + + The return should look like this: + + [ + {'data': {}, + 'osdspec': , + 'host': , + 'notes': + }, + + {'data': ..., + 'osdspec': .., + 'host': ..., + 'notes': ... + } + ] + + Note: One host can have multiple previews based on its assigned OSDSpecs. + """ + self.mgr.log.debug(f"Generating OSDSpec previews for {osdspecs}") + ret_all: List[Dict[str, Any]] = [] + if not osdspecs: + return ret_all + for osdspec in osdspecs: + + # populate osd_id_claims + osd_id_claims = OsdIdClaims(self.mgr) + + # prepare driveselection + for host, ds in self.prepare_drivegroup(osdspec): + if host != for_host: + continue + + # driveselection for host + cmds: List[str] = self.driveselection_to_ceph_volume(ds, + osd_id_claims.filtered_by_host( + host), + preview=True) + if not cmds: + logger.debug("No data_devices, skipping DriveGroup: {}".format( + osdspec.service_name())) + continue + + # get preview data from ceph-volume + for cmd in cmds: + with self.mgr.async_timeout_handler(host, f'cephadm ceph-volume -- {cmd}'): + out, err, code = self.mgr.wait_async(self._run_ceph_volume_command(host, cmd)) + if out: + try: + concat_out: Dict[str, Any] = json.loads(' '.join(out)) + except ValueError: + logger.exception('Cannot decode JSON: \'%s\'' % ' '.join(out)) + concat_out = {} + notes = [] + if osdspec.data_devices is not None and osdspec.data_devices.limit and len(concat_out) < osdspec.data_devices.limit: + found = len(concat_out) + limit = osdspec.data_devices.limit + notes.append( + f'NOTE: Did not find enough disks matching filter on host {host} to reach data device limit (Found: {found} | Limit: {limit})') + ret_all.append({'data': concat_out, + 'osdspec': osdspec.service_id, + 'host': host, + 'notes': notes}) + return ret_all + + def resolve_hosts_for_osdspecs(self, + specs: Optional[List[DriveGroupSpec]] = None + ) -> List[str]: + osdspecs = [] + if specs: + osdspecs = [cast(DriveGroupSpec, spec) for spec in specs] + if not osdspecs: + self.mgr.log.debug("No OSDSpecs found") + return [] + return sum([spec.placement.filter_matching_hostspecs(self.mgr.cache.get_schedulable_hosts()) for spec in osdspecs], []) + + def resolve_osdspecs_for_host(self, host: str, + specs: Optional[List[DriveGroupSpec]] = None) -> List[DriveGroupSpec]: + matching_specs = [] + self.mgr.log.debug(f"Finding OSDSpecs for host: <{host}>") + if not specs: + specs = [cast(DriveGroupSpec, spec) for (sn, spec) in self.mgr.spec_store.spec_preview.items() + if spec.service_type == 'osd'] + for spec in specs: + if host in spec.placement.filter_matching_hostspecs(self.mgr.cache.get_schedulable_hosts()): + self.mgr.log.debug(f"Found OSDSpecs for host: <{host}> -> <{spec}>") + matching_specs.append(spec) + return matching_specs + + async def _run_ceph_volume_command(self, host: str, + cmd: str, env_vars: Optional[List[str]] = None + ) -> Tuple[List[str], List[str], int]: + self.mgr.inventory.assert_host(host) + + # get bootstrap key + ret, keyring, err = self.mgr.check_mon_command({ + 'prefix': 'auth get', + 'entity': 'client.bootstrap-osd', + }) + + j = json.dumps({ + 'config': self.mgr.get_minimal_ceph_conf(), + 'keyring': keyring, + }) + + split_cmd = cmd.split(' ') + _cmd = ['--config-json', '-', '--'] + _cmd.extend(split_cmd) + out, err, code = await CephadmServe(self.mgr)._run_cephadm( + host, 'osd', 'ceph-volume', + _cmd, + env_vars=env_vars, + stdin=j, + error_ok=True) + return out, err, code + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + # Do not remove the osd.N keyring, if we failed to deploy the OSD, because + # we cannot recover from it. The OSD keys are created by ceph-volume and not by + # us. + if not is_failed_deploy: + super().post_remove(daemon, is_failed_deploy=is_failed_deploy) + + +class OsdIdClaims(object): + """ + Retrieve and provide osd ids that can be reused in the cluster + """ + + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr: "CephadmOrchestrator" = mgr + self.osd_host_map: Dict[str, List[str]] = dict() + self.refresh() + + def refresh(self) -> None: + try: + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'osd tree', + 'states': ['destroyed'], + 'format': 'json' + }) + except MonCommandFailed as e: + logger.exception('osd tree failed') + raise OrchestratorError(str(e)) + try: + tree = json.loads(out) + except ValueError: + logger.exception(f'Cannot decode JSON: \'{out}\'') + return + + nodes = tree.get('nodes', {}) + for node in nodes: + if node.get('type') == 'host': + self.osd_host_map.update( + {node.get('name'): [str(_id) for _id in node.get('children', list())]} + ) + if self.osd_host_map: + self.mgr.log.info(f"Found osd claims -> {self.osd_host_map}") + + def get(self) -> Dict[str, List[str]]: + return self.osd_host_map + + def filtered_by_host(self, host: str) -> List[str]: + """ + Return the list of osd ids that can be reused in a host + + OSD id claims in CRUSH map are linked to the bare name of + the hostname. In case of FQDN hostnames the host is searched by the + bare name + """ + return self.osd_host_map.get(host.split(".")[0], []) + + +class RemoveUtil(object): + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr: "CephadmOrchestrator" = mgr + + def get_osds_in_cluster(self) -> List[str]: + osd_map = self.mgr.get_osdmap() + return [str(x.get('osd')) for x in osd_map.dump().get('osds', [])] + + def osd_df(self) -> dict: + base_cmd = 'osd df' + ret, out, err = self.mgr.mon_command({ + 'prefix': base_cmd, + 'format': 'json' + }) + try: + return json.loads(out) + except ValueError: + logger.exception(f'Cannot decode JSON: \'{out}\'') + return {} + + def get_pg_count(self, osd_id: int, osd_df: Optional[dict] = None) -> int: + if not osd_df: + osd_df = self.osd_df() + osd_nodes = osd_df.get('nodes', []) + for osd_node in osd_nodes: + if osd_node.get('id') == int(osd_id): + return osd_node.get('pgs', -1) + return -1 + + def find_osd_stop_threshold(self, osds: List["OSD"]) -> Optional[List["OSD"]]: + """ + Cut osd_id list in half until it's ok-to-stop + + :param osds: list of osd_ids + :return: list of ods_ids that can be stopped at once + """ + if not osds: + return [] + while not self.ok_to_stop(osds): + if len(osds) <= 1: + # can't even stop one OSD, aborting + self.mgr.log.debug( + "Can't even stop one OSD. Cluster is probably busy. Retrying later..") + return [] + + # This potentially prolongs the global wait time. + self.mgr.event.wait(1) + # splitting osd_ids in half until ok_to_stop yields success + # maybe popping ids off one by one is better here..depends on the cluster size I guess.. + # There's a lot of room for micro adjustments here + osds = osds[len(osds) // 2:] + return osds + + # todo start draining + # return all([osd.start_draining() for osd in osds]) + + def ok_to_stop(self, osds: List["OSD"]) -> bool: + cmd_args = { + 'prefix': "osd ok-to-stop", + 'ids': [str(osd.osd_id) for osd in osds] + } + return self._run_mon_cmd(cmd_args, error_ok=True) + + def set_osd_flag(self, osds: List["OSD"], flag: str) -> bool: + base_cmd = f"osd {flag}" + self.mgr.log.debug(f"running cmd: {base_cmd} on ids {osds}") + ret, out, err = self.mgr.mon_command({ + 'prefix': base_cmd, + 'ids': [str(osd.osd_id) for osd in osds] + }) + if ret != 0: + self.mgr.log.error(f"Could not set {flag} flag for {osds}. <{err}>") + return False + self.mgr.log.info(f"{','.join([str(o) for o in osds])} now {flag}") + return True + + def get_weight(self, osd: "OSD") -> Optional[float]: + ret, out, err = self.mgr.mon_command({ + 'prefix': 'osd crush tree', + 'format': 'json', + }) + if ret != 0: + self.mgr.log.error(f"Could not dump crush weights. <{err}>") + return None + j = json.loads(out) + for n in j.get("nodes", []): + if n.get("name") == f"osd.{osd.osd_id}": + self.mgr.log.info(f"{osd} crush weight is {n.get('crush_weight')}") + return n.get("crush_weight") + return None + + def reweight_osd(self, osd: "OSD", weight: float) -> bool: + self.mgr.log.debug(f"running cmd: osd crush reweight on {osd}") + ret, out, err = self.mgr.mon_command({ + 'prefix': "osd crush reweight", + 'name': f"osd.{osd.osd_id}", + 'weight': weight, + }) + if ret != 0: + self.mgr.log.error(f"Could not reweight {osd} to {weight}. <{err}>") + return False + self.mgr.log.info(f"{osd} weight is now {weight}") + return True + + def zap_osd(self, osd: "OSD") -> str: + "Zaps all devices that are associated with an OSD" + if osd.hostname is not None: + cmd = ['--', 'lvm', 'zap', '--osd-id', str(osd.osd_id)] + if not osd.no_destroy: + cmd.append('--destroy') + with self.mgr.async_timeout_handler(osd.hostname, f'cephadm ceph-volume {" ".join(cmd)}'): + out, err, code = self.mgr.wait_async(CephadmServe(self.mgr)._run_cephadm( + osd.hostname, 'osd', 'ceph-volume', + cmd, + error_ok=True)) + self.mgr.cache.invalidate_host_devices(osd.hostname) + if code: + raise OrchestratorError('Zap failed: %s' % '\n'.join(out + err)) + return '\n'.join(out + err) + raise OrchestratorError(f"Failed to zap OSD {osd.osd_id} because host was unknown") + + def safe_to_destroy(self, osd_ids: List[int]) -> bool: + """ Queries the safe-to-destroy flag for OSDs """ + cmd_args = {'prefix': 'osd safe-to-destroy', + 'ids': [str(x) for x in osd_ids]} + return self._run_mon_cmd(cmd_args, error_ok=True) + + def destroy_osd(self, osd_id: int) -> bool: + """ Destroys an OSD (forcefully) """ + cmd_args = {'prefix': 'osd destroy-actual', + 'id': int(osd_id), + 'yes_i_really_mean_it': True} + return self._run_mon_cmd(cmd_args) + + def purge_osd(self, osd_id: int) -> bool: + """ Purges an OSD from the cluster (forcefully) """ + cmd_args = { + 'prefix': 'osd purge-actual', + 'id': int(osd_id), + 'yes_i_really_mean_it': True + } + return self._run_mon_cmd(cmd_args) + + def _run_mon_cmd(self, cmd_args: dict, error_ok: bool = False) -> bool: + """ + Generic command to run mon_command and evaluate/log the results + """ + ret, out, err = self.mgr.mon_command(cmd_args) + if ret != 0: + self.mgr.log.debug(f"ran {cmd_args} with mon_command") + if not error_ok: + self.mgr.log.error( + f"cmd: {cmd_args.get('prefix')} failed with: {err}. (errno:{ret})") + return False + self.mgr.log.debug(f"cmd: {cmd_args.get('prefix')} returns: {out}") + return True + + +class NotFoundError(Exception): + pass + + +class OSD: + + def __init__(self, + osd_id: int, + remove_util: RemoveUtil, + drain_started_at: Optional[datetime] = None, + process_started_at: Optional[datetime] = None, + drain_stopped_at: Optional[datetime] = None, + drain_done_at: Optional[datetime] = None, + draining: bool = False, + started: bool = False, + stopped: bool = False, + replace: bool = False, + force: bool = False, + hostname: Optional[str] = None, + zap: bool = False, + no_destroy: bool = False): + # the ID of the OSD + self.osd_id = osd_id + + # when did process (not the actual draining) start + self.process_started_at = process_started_at + + # when did the drain start + self.drain_started_at = drain_started_at + + # when did the drain stop + self.drain_stopped_at = drain_stopped_at + + # when did the drain finish + self.drain_done_at = drain_done_at + + # did the draining start + self.draining = draining + + # was the operation started + self.started = started + + # was the operation stopped + self.stopped = stopped + + # If this is a replace or remove operation + self.replace = replace + # If we wait for the osd to be drained + self.force = force + # The name of the node + self.hostname = hostname + + # mgr obj to make mgr/mon calls + self.rm_util: RemoveUtil = remove_util + + self.original_weight: Optional[float] = None + + # Whether devices associated with the OSD should be zapped (DATA ERASED) + self.zap = zap + # Whether all associated LV devices should be destroyed. + self.no_destroy = no_destroy + + def start(self) -> None: + if self.started: + logger.debug(f"Already started draining {self}") + return None + self.started = True + self.stopped = False + + def start_draining(self) -> bool: + if self.stopped: + logger.debug(f"Won't start draining {self}. OSD draining is stopped.") + return False + if self.replace: + self.rm_util.set_osd_flag([self], 'out') + else: + self.original_weight = self.rm_util.get_weight(self) + self.rm_util.reweight_osd(self, 0.0) + self.drain_started_at = datetime.utcnow() + self.draining = True + logger.debug(f"Started draining {self}.") + return True + + def stop_draining(self) -> bool: + if self.replace: + self.rm_util.set_osd_flag([self], 'in') + else: + if self.original_weight: + self.rm_util.reweight_osd(self, self.original_weight) + self.drain_stopped_at = datetime.utcnow() + self.draining = False + logger.debug(f"Stopped draining {self}.") + return True + + def stop(self) -> None: + if self.stopped: + logger.debug(f"Already stopped draining {self}") + return None + self.started = False + self.stopped = True + self.stop_draining() + + @property + def is_draining(self) -> bool: + """ + Consider an OSD draining when it is + actively draining but not yet empty + """ + return self.draining and not self.is_empty + + @property + def is_ok_to_stop(self) -> bool: + return self.rm_util.ok_to_stop([self]) + + @property + def is_empty(self) -> bool: + if self.get_pg_count() == 0: + if not self.drain_done_at: + self.drain_done_at = datetime.utcnow() + self.draining = False + return True + return False + + def safe_to_destroy(self) -> bool: + return self.rm_util.safe_to_destroy([self.osd_id]) + + def down(self) -> bool: + return self.rm_util.set_osd_flag([self], 'down') + + def destroy(self) -> bool: + return self.rm_util.destroy_osd(self.osd_id) + + def do_zap(self) -> str: + return self.rm_util.zap_osd(self) + + def purge(self) -> bool: + return self.rm_util.purge_osd(self.osd_id) + + def get_pg_count(self) -> int: + return self.rm_util.get_pg_count(self.osd_id) + + @property + def exists(self) -> bool: + return str(self.osd_id) in self.rm_util.get_osds_in_cluster() + + def drain_status_human(self) -> str: + default_status = 'not started' + status = 'started' if self.started and not self.draining else default_status + status = 'draining' if self.draining else status + status = 'done, waiting for purge' if self.drain_done_at and not self.draining else status + return status + + def pg_count_str(self) -> str: + return 'n/a' if self.get_pg_count() < 0 else str(self.get_pg_count()) + + def to_json(self) -> dict: + out: Dict[str, Any] = dict() + out['osd_id'] = self.osd_id + out['started'] = self.started + out['draining'] = self.draining + out['stopped'] = self.stopped + out['replace'] = self.replace + out['force'] = self.force + out['zap'] = self.zap + out['hostname'] = self.hostname # type: ignore + + for k in ['drain_started_at', 'drain_stopped_at', 'drain_done_at', 'process_started_at']: + if getattr(self, k): + out[k] = datetime_to_str(getattr(self, k)) + else: + out[k] = getattr(self, k) + return out + + @classmethod + def from_json(cls, inp: Optional[Dict[str, Any]], rm_util: RemoveUtil) -> Optional["OSD"]: + if not inp: + return None + for date_field in ['drain_started_at', 'drain_stopped_at', 'drain_done_at', 'process_started_at']: + if inp.get(date_field): + inp.update({date_field: str_to_datetime(inp.get(date_field, ''))}) + inp.update({'remove_util': rm_util}) + if 'nodename' in inp: + hostname = inp.pop('nodename') + inp['hostname'] = hostname + return cls(**inp) + + def __hash__(self) -> int: + return hash(self.osd_id) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, OSD): + return NotImplemented + return self.osd_id == other.osd_id + + def __repr__(self) -> str: + return f"osd.{self.osd_id}{' (draining)' if self.draining else ''}" + + +class OSDRemovalQueue(object): + + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr: "CephadmOrchestrator" = mgr + self.osds: Set[OSD] = set() + self.rm_util = RemoveUtil(mgr) + + # locks multithreaded access to self.osds. Please avoid locking + # network calls, like mon commands. + self.lock = Lock() + + def process_removal_queue(self) -> None: + """ + Performs actions in the _serve() loop to remove an OSD + when criteria is met. + + we can't hold self.lock, as we're calling _remove_daemon in the loop + """ + + # make sure that we don't run on OSDs that are not in the cluster anymore. + self.cleanup() + + # find osds that are ok-to-stop and not yet draining + ready_to_drain_osds = self._ready_to_drain_osds() + if ready_to_drain_osds: + # start draining those + _ = [osd.start_draining() for osd in ready_to_drain_osds] + + all_osds = self.all_osds() + + logger.debug( + f"{self.queue_size()} OSDs are scheduled " + f"for removal: {all_osds}") + + # Check all osds for their state and take action (remove, purge etc) + new_queue: Set[OSD] = set() + for osd in all_osds: # type: OSD + if not osd.force: + # skip criteria + if not osd.is_empty: + logger.debug(f"{osd} is not empty yet. Waiting a bit more") + new_queue.add(osd) + continue + + if not osd.safe_to_destroy(): + logger.debug( + f"{osd} is not safe-to-destroy yet. Waiting a bit more") + new_queue.add(osd) + continue + + # abort criteria + if not osd.down(): + # also remove it from the remove_osd list and set a health_check warning? + raise orchestrator.OrchestratorError( + f"Could not mark {osd} down") + + # stop and remove daemon + assert osd.hostname is not None + + if self.mgr.cache.has_daemon(f'osd.{osd.osd_id}'): + CephadmServe(self.mgr)._remove_daemon(f'osd.{osd.osd_id}', osd.hostname) + logger.info(f"Successfully removed {osd} on {osd.hostname}") + else: + logger.info(f"Daemon {osd} on {osd.hostname} was already removed") + + if osd.replace: + # mark destroyed in osdmap + if not osd.destroy(): + raise orchestrator.OrchestratorError( + f"Could not destroy {osd}") + logger.info( + f"Successfully destroyed old {osd} on {osd.hostname}; ready for replacement") + else: + # purge from osdmap + if not osd.purge(): + raise orchestrator.OrchestratorError(f"Could not purge {osd}") + logger.info(f"Successfully purged {osd} on {osd.hostname}") + + if osd.zap: + # throws an exception if the zap fails + logger.info(f"Zapping devices for {osd} on {osd.hostname}") + osd.do_zap() + logger.info(f"Successfully zapped devices for {osd} on {osd.hostname}") + + logger.debug(f"Removing {osd} from the queue.") + + # self could change while this is processing (osds get added from the CLI) + # The new set is: 'an intersection of all osds that are still not empty/removed (new_queue) and + # osds that were added while this method was executed' + with self.lock: + self.osds.intersection_update(new_queue) + self._save_to_store() + + def cleanup(self) -> None: + # OSDs can always be cleaned up manually. This ensures that we run on existing OSDs + with self.lock: + for osd in self._not_in_cluster(): + self.osds.remove(osd) + + def _ready_to_drain_osds(self) -> List["OSD"]: + """ + Returns OSDs that are ok to stop and not yet draining. Only returns as many OSDs as can + be accommodated by the 'max_osd_draining_count' config value, considering the number of OSDs + that are already draining. + """ + draining_limit = max(1, self.mgr.max_osd_draining_count) + num_already_draining = len(self.draining_osds()) + num_to_start_draining = max(0, draining_limit - num_already_draining) + stoppable_osds = self.rm_util.find_osd_stop_threshold(self.idling_osds()) + return [] if stoppable_osds is None else stoppable_osds[:num_to_start_draining] + + def _save_to_store(self) -> None: + osd_queue = [osd.to_json() for osd in self.osds] + logger.debug(f"Saving {osd_queue} to store") + self.mgr.set_store('osd_remove_queue', json.dumps(osd_queue)) + + def load_from_store(self) -> None: + with self.lock: + for k, v in self.mgr.get_store_prefix('osd_remove_queue').items(): + for osd in json.loads(v): + logger.debug(f"Loading osd ->{osd} from store") + osd_obj = OSD.from_json(osd, rm_util=self.rm_util) + if osd_obj is not None: + self.osds.add(osd_obj) + + def as_osd_ids(self) -> List[int]: + with self.lock: + return [osd.osd_id for osd in self.osds] + + def queue_size(self) -> int: + with self.lock: + return len(self.osds) + + def draining_osds(self) -> List["OSD"]: + with self.lock: + return [osd for osd in self.osds if osd.is_draining] + + def idling_osds(self) -> List["OSD"]: + with self.lock: + return [osd for osd in self.osds if not osd.is_draining and not osd.is_empty] + + def empty_osds(self) -> List["OSD"]: + with self.lock: + return [osd for osd in self.osds if osd.is_empty] + + def all_osds(self) -> List["OSD"]: + with self.lock: + return [osd for osd in self.osds] + + def _not_in_cluster(self) -> List["OSD"]: + return [osd for osd in self.osds if not osd.exists] + + def enqueue(self, osd: "OSD") -> None: + if not osd.exists: + raise NotFoundError() + with self.lock: + self.osds.add(osd) + osd.start() + + def rm(self, osd: "OSD") -> None: + if not osd.exists: + raise NotFoundError() + osd.stop() + with self.lock: + try: + logger.debug(f'Removing {osd} from the queue.') + self.osds.remove(osd) + except KeyError: + logger.debug(f"Could not find {osd} in queue.") + raise KeyError + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, OSDRemovalQueue): + return False + with self.lock: + return self.osds == other.osds diff --git a/src/pybind/mgr/cephadm/ssh.py b/src/pybind/mgr/cephadm/ssh.py new file mode 100644 index 000000000..d17cc0fcc --- /dev/null +++ b/src/pybind/mgr/cephadm/ssh.py @@ -0,0 +1,369 @@ +import logging +import os +import asyncio +from tempfile import NamedTemporaryFile +from threading import Thread +from contextlib import contextmanager +from io import StringIO +from shlex import quote +from typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Iterator, TypeVar, Awaitable, Union +from orchestrator import OrchestratorError + +try: + import asyncssh +except ImportError: + asyncssh = None # type: ignore + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + from asyncssh.connection import SSHClientConnection + +T = TypeVar('T') + + +logger = logging.getLogger(__name__) + +asyncssh_logger = logging.getLogger('asyncssh') +asyncssh_logger.propagate = False + + +class HostConnectionError(OrchestratorError): + def __init__(self, message: str, hostname: str, addr: str) -> None: + super().__init__(message) + self.hostname = hostname + self.addr = addr + + +DEFAULT_SSH_CONFIG = """ +Host * + User root + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + ConnectTimeout=30 +""" + + +class EventLoopThread(Thread): + + def __init__(self) -> None: + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + + super().__init__(target=self._loop.run_forever) + self.start() + + def get_result(self, coro: Awaitable[T], timeout: Optional[int] = None) -> T: + # useful to note: This "run_coroutine_threadsafe" returns a + # concurrent.futures.Future, rather than an asyncio.Future. They are + # fairly similar but have a few differences, notably in our case + # that the result function of a concurrent.futures.Future accepts + # a timeout argument + future = asyncio.run_coroutine_threadsafe(coro, self._loop) + try: + return future.result(timeout) + except asyncio.TimeoutError: + # try to cancel the task before raising the exception further up + future.cancel() + raise + + +class SSHManager: + + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr: "CephadmOrchestrator" = mgr + self.cons: Dict[str, "SSHClientConnection"] = {} + + async def _remote_connection(self, + host: str, + addr: Optional[str] = None, + ) -> "SSHClientConnection": + if not self.cons.get(host) or host not in self.mgr.inventory: + if not addr and host in self.mgr.inventory: + addr = self.mgr.inventory.get_addr(host) + + if not addr: + raise OrchestratorError("host address is empty") + + assert self.mgr.ssh_user + n = self.mgr.ssh_user + '@' + addr + logger.debug("Opening connection to {} with ssh options '{}'".format( + n, self.mgr._ssh_options)) + + asyncssh.set_log_level('DEBUG') + asyncssh.set_debug_level(3) + + with self.redirect_log(host, addr): + try: + ssh_options = asyncssh.SSHClientConnectionOptions( + keepalive_interval=7, keepalive_count_max=3) + conn = await asyncssh.connect(addr, username=self.mgr.ssh_user, client_keys=[self.mgr.tkey.name], + known_hosts=None, config=[self.mgr.ssh_config_fname], + preferred_auth=['publickey'], options=ssh_options) + except OSError: + raise + except asyncssh.Error: + raise + except Exception: + raise + self.cons[host] = conn + + self.mgr.offline_hosts_remove(host) + + return self.cons[host] + + @contextmanager + def redirect_log(self, host: str, addr: str) -> Iterator[None]: + log_string = StringIO() + ch = logging.StreamHandler(log_string) + ch.setLevel(logging.INFO) + asyncssh_logger.addHandler(ch) + + try: + yield + except OSError as e: + self.mgr.offline_hosts.add(host) + log_content = log_string.getvalue() + msg = f"Can't communicate with remote host `{addr}`, possibly because the host is not reachable or python3 is not installed on the host. {str(e)}" + logger.exception(msg) + raise HostConnectionError(msg, host, addr) + except asyncssh.Error as e: + self.mgr.offline_hosts.add(host) + log_content = log_string.getvalue() + msg = f'Failed to connect to {host} ({addr}). {str(e)}' + '\n' + f'Log: {log_content}' + logger.debug(msg) + raise HostConnectionError(msg, host, addr) + except Exception as e: + self.mgr.offline_hosts.add(host) + log_content = log_string.getvalue() + logger.exception(str(e)) + raise HostConnectionError( + f'Failed to connect to {host} ({addr}): {repr(e)}' + '\n' f'Log: {log_content}', host, addr) + finally: + log_string.flush() + asyncssh_logger.removeHandler(ch) + + def remote_connection(self, + host: str, + addr: Optional[str] = None, + ) -> "SSHClientConnection": + with self.mgr.async_timeout_handler(host, f'ssh {host} (addr {addr})'): + return self.mgr.wait_async(self._remote_connection(host, addr)) + + async def _execute_command(self, + host: str, + cmd_components: List[str], + stdin: Optional[str] = None, + addr: Optional[str] = None, + log_command: Optional[bool] = True, + ) -> Tuple[str, str, int]: + + conn = await self._remote_connection(host, addr) + sudo_prefix = "sudo " if self.mgr.ssh_user != 'root' else "" + cmd = sudo_prefix + " ".join(quote(x) for x in cmd_components) + try: + address = addr or self.mgr.inventory.get_addr(host) + except Exception: + address = host + if log_command: + logger.debug(f'Running command: {cmd}') + try: + r = await conn.run(f'{sudo_prefix}true', check=True, timeout=5) # host quick check + r = await conn.run(cmd, input=stdin) + # handle these Exceptions otherwise you might get a weird error like + # TypeError: __init__() missing 1 required positional argument: 'reason' (due to the asyncssh error interacting with raise_if_exception) + except asyncssh.ChannelOpenError as e: + # SSH connection closed or broken, will create new connection next call + logger.debug(f'Connection to {host} failed. {str(e)}') + await self._reset_con(host) + self.mgr.offline_hosts.add(host) + raise HostConnectionError(f'Unable to reach remote host {host}. {str(e)}', host, address) + except asyncssh.ProcessError as e: + msg = f"Cannot execute the command '{cmd}' on the {host}. {str(e.stderr)}." + logger.debug(msg) + await self._reset_con(host) + self.mgr.offline_hosts.add(host) + raise HostConnectionError(msg, host, address) + except Exception as e: + msg = f"Generic error while executing command '{cmd}' on the host {host}. {str(e)}." + logger.debug(msg) + await self._reset_con(host) + self.mgr.offline_hosts.add(host) + raise HostConnectionError(msg, host, address) + + def _rstrip(v: Union[bytes, str, None]) -> str: + if not v: + return '' + if isinstance(v, str): + return v.rstrip('\n') + if isinstance(v, bytes): + return v.decode().rstrip('\n') + raise OrchestratorError( + f'Unable to parse ssh output with type {type(v)} from remote host {host}') + + out = _rstrip(r.stdout) + err = _rstrip(r.stderr) + rc = r.returncode if r.returncode else 0 + + return out, err, rc + + def execute_command(self, + host: str, + cmd: List[str], + stdin: Optional[str] = None, + addr: Optional[str] = None, + log_command: Optional[bool] = True + ) -> Tuple[str, str, int]: + with self.mgr.async_timeout_handler(host, " ".join(cmd)): + return self.mgr.wait_async(self._execute_command(host, cmd, stdin, addr, log_command)) + + async def _check_execute_command(self, + host: str, + cmd: List[str], + stdin: Optional[str] = None, + addr: Optional[str] = None, + log_command: Optional[bool] = True + ) -> str: + out, err, code = await self._execute_command(host, cmd, stdin, addr, log_command) + if code != 0: + msg = f'Command {cmd} failed. {err}' + logger.debug(msg) + raise OrchestratorError(msg) + return out + + def check_execute_command(self, + host: str, + cmd: List[str], + stdin: Optional[str] = None, + addr: Optional[str] = None, + log_command: Optional[bool] = True, + ) -> str: + with self.mgr.async_timeout_handler(host, " ".join(cmd)): + return self.mgr.wait_async(self._check_execute_command(host, cmd, stdin, addr, log_command)) + + async def _write_remote_file(self, + host: str, + path: str, + content: bytes, + mode: Optional[int] = None, + uid: Optional[int] = None, + gid: Optional[int] = None, + addr: Optional[str] = None, + ) -> None: + try: + cephadm_tmp_dir = f"/tmp/cephadm-{self.mgr._cluster_fsid}" + dirname = os.path.dirname(path) + await self._check_execute_command(host, ['mkdir', '-p', dirname], addr=addr) + await self._check_execute_command(host, ['mkdir', '-p', cephadm_tmp_dir + dirname], addr=addr) + tmp_path = cephadm_tmp_dir + path + '.new' + await self._check_execute_command(host, ['touch', tmp_path], addr=addr) + if self.mgr.ssh_user != 'root': + assert self.mgr.ssh_user + await self._check_execute_command(host, ['chown', '-R', self.mgr.ssh_user, cephadm_tmp_dir], addr=addr) + await self._check_execute_command(host, ['chmod', str(644), tmp_path], addr=addr) + with NamedTemporaryFile(prefix='cephadm-write-remote-file-') as f: + os.fchmod(f.fileno(), 0o600) + f.write(content) + f.flush() + conn = await self._remote_connection(host, addr) + async with conn.start_sftp_client() as sftp: + await sftp.put(f.name, tmp_path) + if uid is not None and gid is not None and mode is not None: + # shlex quote takes str or byte object, not int + await self._check_execute_command(host, ['chown', '-R', str(uid) + ':' + str(gid), tmp_path], addr=addr) + await self._check_execute_command(host, ['chmod', oct(mode)[2:], tmp_path], addr=addr) + await self._check_execute_command(host, ['mv', tmp_path, path], addr=addr) + except Exception as e: + msg = f"Unable to write {host}:{path}: {e}" + logger.exception(msg) + raise OrchestratorError(msg) + + def write_remote_file(self, + host: str, + path: str, + content: bytes, + mode: Optional[int] = None, + uid: Optional[int] = None, + gid: Optional[int] = None, + addr: Optional[str] = None, + ) -> None: + with self.mgr.async_timeout_handler(host, f'writing file {path}'): + self.mgr.wait_async(self._write_remote_file( + host, path, content, mode, uid, gid, addr)) + + async def _reset_con(self, host: str) -> None: + conn = self.cons.get(host) + if conn: + logger.debug(f'_reset_con close {host}') + conn.close() + del self.cons[host] + + def reset_con(self, host: str) -> None: + with self.mgr.async_timeout_handler(cmd=f'resetting ssh connection to {host}'): + self.mgr.wait_async(self._reset_con(host)) + + def _reset_cons(self) -> None: + for host, conn in self.cons.items(): + logger.debug(f'_reset_cons close {host}') + conn.close() + self.cons = {} + + def _reconfig_ssh(self) -> None: + temp_files = [] # type: list + ssh_options = [] # type: List[str] + + # ssh_config + self.mgr.ssh_config_fname = self.mgr.ssh_config_file + ssh_config = self.mgr.get_store("ssh_config") + if ssh_config is not None or self.mgr.ssh_config_fname is None: + if not ssh_config: + ssh_config = DEFAULT_SSH_CONFIG + f = NamedTemporaryFile(prefix='cephadm-conf-') + os.fchmod(f.fileno(), 0o600) + f.write(ssh_config.encode('utf-8')) + f.flush() # make visible to other processes + temp_files += [f] + self.mgr.ssh_config_fname = f.name + if self.mgr.ssh_config_fname: + self.mgr.validate_ssh_config_fname(self.mgr.ssh_config_fname) + ssh_options += ['-F', self.mgr.ssh_config_fname] + self.mgr.ssh_config = ssh_config + + # identity + ssh_key = self.mgr.get_store("ssh_identity_key") + ssh_pub = self.mgr.get_store("ssh_identity_pub") + ssh_cert = self.mgr.get_store("ssh_identity_cert") + self.mgr.ssh_pub = ssh_pub + self.mgr.ssh_key = ssh_key + self.mgr.ssh_cert = ssh_cert + if ssh_key: + self.mgr.tkey = NamedTemporaryFile(prefix='cephadm-identity-') + self.mgr.tkey.write(ssh_key.encode('utf-8')) + os.fchmod(self.mgr.tkey.fileno(), 0o600) + self.mgr.tkey.flush() # make visible to other processes + temp_files += [self.mgr.tkey] + if ssh_pub: + tpub = open(self.mgr.tkey.name + '.pub', 'w') + os.fchmod(tpub.fileno(), 0o600) + tpub.write(ssh_pub) + tpub.flush() # make visible to other processes + temp_files += [tpub] + if ssh_cert: + tcert = open(self.mgr.tkey.name + '-cert.pub', 'w') + os.fchmod(tcert.fileno(), 0o600) + tcert.write(ssh_cert) + tcert.flush() # make visible to other processes + temp_files += [tcert] + ssh_options += ['-i', self.mgr.tkey.name] + + self.mgr._temp_files = temp_files + if ssh_options: + self.mgr._ssh_options = ' '.join(ssh_options) + else: + self.mgr._ssh_options = None + + if self.mgr.mode == 'root': + self.mgr.ssh_user = self.mgr.get_store('ssh_user', default='root') + elif self.mgr.mode == 'cephadm-package': + self.mgr.ssh_user = 'cephadm' + + self._reset_cons() diff --git a/src/pybind/mgr/cephadm/ssl_cert_utils.py b/src/pybind/mgr/cephadm/ssl_cert_utils.py new file mode 100644 index 000000000..fcc6f00ea --- /dev/null +++ b/src/pybind/mgr/cephadm/ssl_cert_utils.py @@ -0,0 +1,156 @@ + +from typing import Any, Tuple, IO +import ipaddress +import tempfile +import logging + +from datetime import datetime, timedelta +from cryptography import x509 +from cryptography.x509.oid import NameOID +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.backends import default_backend +from mgr_util import verify_tls_files + +from orchestrator import OrchestratorError + + +logger = logging.getLogger(__name__) + + +class SSLConfigException(Exception): + pass + + +class SSLCerts: + def __init__(self) -> None: + self.root_cert: Any + self.root_key: Any + self.key_file: IO[bytes] + self.cert_file: IO[bytes] + + def generate_root_cert(self, addr: str) -> Tuple[str, str]: + self.root_key = rsa.generate_private_key( + public_exponent=65537, key_size=4096, backend=default_backend()) + root_public_key = self.root_key.public_key() + root_builder = x509.CertificateBuilder() + root_builder = root_builder.subject_name(x509.Name([ + x509.NameAttribute(NameOID.COMMON_NAME, u'cephadm-root'), + ])) + root_builder = root_builder.issuer_name(x509.Name([ + x509.NameAttribute(NameOID.COMMON_NAME, u'cephadm-root'), + ])) + root_builder = root_builder.not_valid_before(datetime.now()) + root_builder = root_builder.not_valid_after(datetime.now() + timedelta(days=(365 * 10 + 3))) + root_builder = root_builder.serial_number(x509.random_serial_number()) + root_builder = root_builder.public_key(root_public_key) + root_builder = root_builder.add_extension( + x509.SubjectAlternativeName( + [x509.IPAddress(ipaddress.IPv4Address(addr))] + ), + critical=False + ) + root_builder = root_builder.add_extension( + x509.BasicConstraints(ca=True, path_length=None), critical=True, + ) + + self.root_cert = root_builder.sign( + private_key=self.root_key, algorithm=hashes.SHA256(), backend=default_backend() + ) + + cert_str = self.root_cert.public_bytes(encoding=serialization.Encoding.PEM).decode('utf-8') + key_str = self.root_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption() + ).decode('utf-8') + + return (cert_str, key_str) + + def generate_cert(self, host: str, addr: str) -> Tuple[str, str]: + have_ip = True + try: + ip = x509.IPAddress(ipaddress.IPv4Address(addr)) + except Exception: + try: + ip = x509.IPAddress(ipaddress.IPv6Address(addr)) + except Exception: + have_ip = False + + private_key = rsa.generate_private_key( + public_exponent=65537, key_size=4096, backend=default_backend()) + public_key = private_key.public_key() + + builder = x509.CertificateBuilder() + builder = builder.subject_name(x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, addr), ])) + builder = builder.issuer_name( + x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, u'cephadm-root'), ])) + builder = builder.not_valid_before(datetime.now()) + builder = builder.not_valid_after(datetime.now() + timedelta(days=(365 * 10 + 3))) + builder = builder.serial_number(x509.random_serial_number()) + builder = builder.public_key(public_key) + if have_ip: + builder = builder.add_extension( + x509.SubjectAlternativeName( + [ip, x509.DNSName(host)] + ), + critical=False + ) + else: + builder = builder.add_extension( + x509.SubjectAlternativeName( + [x509.DNSName(host)] + ), + critical=False + ) + builder = builder.add_extension(x509.BasicConstraints( + ca=False, path_length=None), critical=True,) + + cert = builder.sign(private_key=self.root_key, + algorithm=hashes.SHA256(), backend=default_backend()) + cert_str = cert.public_bytes(encoding=serialization.Encoding.PEM).decode('utf-8') + key_str = private_key.private_bytes(encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption() + ).decode('utf-8') + + return (cert_str, key_str) + + def generate_cert_files(self, host: str, addr: str) -> Tuple[str, str]: + cert, key = self.generate_cert(host, addr) + + self.cert_file = tempfile.NamedTemporaryFile() + self.cert_file.write(cert.encode('utf-8')) + self.cert_file.flush() # cert_tmp must not be gc'ed + + self.key_file = tempfile.NamedTemporaryFile() + self.key_file.write(key.encode('utf-8')) + self.key_file.flush() # pkey_tmp must not be gc'ed + + verify_tls_files(self.cert_file.name, self.key_file.name) + return self.cert_file.name, self.key_file.name + + def get_root_cert(self) -> str: + try: + return self.root_cert.public_bytes(encoding=serialization.Encoding.PEM).decode('utf-8') + except AttributeError: + return '' + + def get_root_key(self) -> str: + try: + return self.root_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ).decode('utf-8') + except AttributeError: + return '' + + def load_root_credentials(self, cert: str, priv_key: str) -> None: + given_cert = x509.load_pem_x509_certificate(cert.encode('utf-8'), backend=default_backend()) + tz = given_cert.not_valid_after.tzinfo + if datetime.now(tz) >= given_cert.not_valid_after: + raise OrchestratorError('Given cert is expired') + self.root_cert = given_cert + self.root_key = serialization.load_pem_private_key( + data=priv_key.encode('utf-8'), backend=default_backend(), password=None) diff --git a/src/pybind/mgr/cephadm/template.py b/src/pybind/mgr/cephadm/template.py new file mode 100644 index 000000000..0d62e587c --- /dev/null +++ b/src/pybind/mgr/cephadm/template.py @@ -0,0 +1,109 @@ +import copy +from typing import Optional, TYPE_CHECKING + +from jinja2 import Environment, PackageLoader, select_autoescape, StrictUndefined +from jinja2 import exceptions as j2_exceptions + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + + +class TemplateError(Exception): + pass + + +class UndefinedError(TemplateError): + pass + + +class TemplateNotFoundError(TemplateError): + pass + + +class TemplateEngine: + def render(self, name: str, context: Optional[dict] = None) -> str: + raise NotImplementedError() + + +class Jinja2Engine(TemplateEngine): + def __init__(self) -> None: + self.env = Environment( + loader=PackageLoader('cephadm', 'templates'), + autoescape=select_autoescape(['html', 'xml'], default_for_string=False), + trim_blocks=True, + lstrip_blocks=True, + undefined=StrictUndefined + ) + + def render(self, name: str, context: Optional[dict] = None) -> str: + try: + template = self.env.get_template(name) + if context is None: + return template.render() + return template.render(context) + except j2_exceptions.UndefinedError as e: + raise UndefinedError(e.message) + except j2_exceptions.TemplateNotFound as e: + raise TemplateNotFoundError(e.message) + + def render_plain(self, source: str, context: Optional[dict]) -> str: + try: + template = self.env.from_string(source) + if context is None: + return template.render() + return template.render(context) + except j2_exceptions.UndefinedError as e: + raise UndefinedError(e.message) + except j2_exceptions.TemplateNotFound as e: + raise TemplateNotFoundError(e.message) + + +class TemplateMgr: + def __init__(self, mgr: "CephadmOrchestrator"): + self.engine = Jinja2Engine() + self.base_context = { + 'cephadm_managed': 'This file is generated by cephadm.' + } + self.mgr = mgr + + def render(self, name: str, + context: Optional[dict] = None, + managed_context: bool = True, + host: Optional[str] = None) -> str: + """Render a string from a template with context. + + :param name: template name. e.g. services/nfs/ganesha.conf.j2 + :type name: str + :param context: a dictionary that contains values to be used in the template, defaults + to None + :type context: Optional[dict], optional + :param managed_context: to inject default context like managed header or not, defaults + to True + :type managed_context: bool, optional + :param host: The host name used to build the key to access + the module's persistent key-value store. + :type host: Optional[str], optional + :return: the templated string + :rtype: str + """ + ctx = {} + if managed_context: + ctx = copy.deepcopy(self.base_context) + if context is not None: + ctx = {**ctx, **context} + + # Check if the given name exists in the module's persistent + # key-value store, e.g. + # - blink_device_light_cmd + # - /blink_device_light_cmd + # - services/nfs/ganesha.conf + store_name = name.rstrip('.j2') + custom_template = self.mgr.get_store(store_name, None) + if host and custom_template is None: + store_name = '{}/{}'.format(host, store_name) + custom_template = self.mgr.get_store(store_name, None) + + if custom_template: + return self.engine.render_plain(custom_template, ctx) + else: + return self.engine.render(name, ctx) diff --git a/src/pybind/mgr/cephadm/templates/blink_device_light_cmd.j2 b/src/pybind/mgr/cephadm/templates/blink_device_light_cmd.j2 new file mode 100644 index 000000000..dab115833 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/blink_device_light_cmd.j2 @@ -0,0 +1 @@ +lsmcli local-disk-{{ ident_fault }}-led-{{'on' if on else 'off'}} --path '{{ path or dev }}' diff --git a/src/pybind/mgr/cephadm/templates/services/alertmanager/alertmanager.yml.j2 b/src/pybind/mgr/cephadm/templates/services/alertmanager/alertmanager.yml.j2 new file mode 100644 index 000000000..b34a1fc17 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/alertmanager/alertmanager.yml.j2 @@ -0,0 +1,51 @@ +# {{ cephadm_managed }} +# See https://prometheus.io/docs/alerting/configuration/ for documentation. + +global: + resolve_timeout: 5m +{% if not secure %} + http_config: + tls_config: +{% if secure_monitoring_stack %} + ca_file: root_cert.pem +{% else %} + insecure_skip_verify: true +{% endif %} +{% endif %} + +route: + receiver: 'default' + routes: + - group_by: ['alertname'] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: 'ceph-dashboard' +{% if snmp_gateway_urls %} + continue: true + - receiver: 'snmp-gateway' + repeat_interval: 1h + group_interval: 10s + group_by: ['alertname'] + match_re: + oid: "(1.3.6.1.4.1.50495.).*" +{% endif %} + +receivers: +- name: 'default' + webhook_configs: +{% for url in default_webhook_urls %} + - url: '{{ url }}' +{% endfor %} +- name: 'ceph-dashboard' + webhook_configs: +{% for url in dashboard_urls %} + - url: '{{ url }}/api/prometheus_receiver' +{% endfor %} +{% if snmp_gateway_urls %} +- name: 'snmp-gateway' + webhook_configs: +{% for url in snmp_gateway_urls %} + - url: '{{ url }}' +{% endfor %} +{% endif %} diff --git a/src/pybind/mgr/cephadm/templates/services/alertmanager/web.yml.j2 b/src/pybind/mgr/cephadm/templates/services/alertmanager/web.yml.j2 new file mode 100644 index 000000000..ef4f0b4c7 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/alertmanager/web.yml.j2 @@ -0,0 +1,5 @@ +tls_server_config: + cert_file: alertmanager.crt + key_file: alertmanager.key +basic_auth_users: + {{ alertmanager_web_user }}: {{ alertmanager_web_password }} diff --git a/src/pybind/mgr/cephadm/templates/services/grafana/ceph-dashboard.yml.j2 b/src/pybind/mgr/cephadm/templates/services/grafana/ceph-dashboard.yml.j2 new file mode 100644 index 000000000..46aea864f --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/grafana/ceph-dashboard.yml.j2 @@ -0,0 +1,39 @@ +# {{ cephadm_managed }} +apiVersion: 1 + +deleteDatasources: +{% for host in hosts %} + - name: 'Dashboard{{ loop.index }}' + orgId: 1 +{% endfor %} + +datasources: +{% for host in hosts %} + - name: 'Dashboard{{ loop.index }}' + type: 'prometheus' + access: 'proxy' + orgId: 1 + url: '{{ host }}' + basicAuth: {{ 'true' if security_enabled else 'false' }} + isDefault: {{ 'true' if loop.first else 'false' }} + editable: false +{% if security_enabled %} + basicAuthUser: {{ prometheus_user }} + jsonData: + graphiteVersion: "1.1" + tlsAuth: false + tlsAuthWithCACert: true + tlsSkipVerify: false + secureJsonData: + basicAuthPassword: {{ prometheus_password }} + tlsCACert: "{{ cephadm_root_ca }}" +{% endif %} +{% endfor %} + + - name: 'Loki' + type: 'loki' + access: 'proxy' + url: '{{ loki_host }}' + basicAuth: false + isDefault: false + editable: false diff --git a/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2 b/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2 new file mode 100644 index 000000000..e6c7bce15 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2 @@ -0,0 +1,28 @@ +# {{ cephadm_managed }} +[users] + default_theme = light +{% if anonymous_access %} +[auth.anonymous] + enabled = true + org_name = 'Main Org.' + org_role = 'Viewer' +{% endif %} +[server] + domain = 'bootstrap.storage.lab' + protocol = {{ protocol }} + cert_file = /etc/grafana/certs/cert_file + cert_key = /etc/grafana/certs/cert_key + http_port = {{ http_port }} + http_addr = {{ http_addr }} +[snapshots] + external_enabled = false +[security] +{% if not initial_admin_password %} + disable_initial_admin_creation = true +{% else %} + admin_user = admin + admin_password = {{ initial_admin_password }} +{% endif %} + cookie_secure = true + cookie_samesite = none + allow_embedding = true diff --git a/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 b/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 new file mode 100644 index 000000000..100acce40 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 @@ -0,0 +1,90 @@ +# {{ cephadm_managed }} +global + log 127.0.0.1 local2 + chroot /var/lib/haproxy + pidfile /var/lib/haproxy/haproxy.pid + maxconn 8000 + daemon + stats socket /var/lib/haproxy/stats +{% if spec.ssl_cert %} + {% if spec.ssl_dh_param %} + tune.ssl.default-dh-param {{ spec.ssl_dh_param }} + {% endif %} + {% if spec.ssl_ciphers %} + ssl-default-bind-ciphers {{ spec.ssl_ciphers | join(':') }} + {% endif %} + {% if spec.ssl_options %} + ssl-default-bind-options {{ spec.ssl_options | join(' ') }} + {% endif %} +{% endif %} + +defaults + mode {{ mode }} + log global +{% if mode == 'http' %} + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout queue 20s + timeout connect 5s + timeout http-request 1s + timeout http-keep-alive 5s + timeout client 30s + timeout server 30s + timeout check 5s +{% endif %} +{% if mode == 'tcp' %} + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout check 10s +{% endif %} + maxconn 8000 + +frontend stats + mode http + bind {{ ip }}:{{ monitor_port }} + bind {{ local_host_ip }}:{{ monitor_port }} + stats enable + stats uri /stats + stats refresh 10s + stats auth {{ user }}:{{ password }} + http-request use-service prometheus-exporter if { path /metrics } + monitor-uri /health + +frontend frontend +{% if spec.ssl_cert %} + bind {{ ip }}:{{ frontend_port }} ssl crt /var/lib/haproxy/haproxy.pem +{% else %} + bind {{ ip }}:{{ frontend_port }} +{% endif %} + default_backend backend + +backend backend +{% if mode == 'http' %} + option forwardfor +{% if backend_spec.ssl %} + default-server ssl + default-server verify none +{% endif %} + balance static-rr + option httpchk HEAD / HTTP/1.0 + {% for server in servers %} + server {{ server.name }} {{ server.ip }}:{{ server.port }} check weight 100 + {% endfor %} +{% endif %} +{% if mode == 'tcp' %} + mode tcp + balance source + hash-type consistent +{% if default_server_opts %} + default-server {{ default_server_opts|join(" ") }} +{% endif %} + {% for server in servers %} + server {{ server.name }} {{ server.ip }}:{{ server.port }} + {% endfor %} +{% endif %} diff --git a/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 b/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 new file mode 100644 index 000000000..e19f556c6 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 @@ -0,0 +1,36 @@ +# {{ cephadm_managed }} +vrrp_script check_backend { + script "{{ script }}" + weight -20 + interval 2 + rise 2 + fall 2 +} + +{% for x in range(virtual_ips|length) %} +vrrp_instance VI_{{ x }} { + state {{ states[x] }} + priority {{ priorities[x] }} + interface {{ vrrp_interfaces[x] }} + virtual_router_id {{ first_virtual_router_id + x }} + advert_int 1 + authentication { + auth_type PASS + auth_pass {{ password }} + } +{% if not spec.use_keepalived_multicast %} + unicast_src_ip {{ host_ips[x] }} + unicast_peer { + {% for ip in other_ips[x] %} + {{ ip }} + {% endfor %} + } +{% endif %} + virtual_ipaddress { + {{ virtual_ips[x] }} dev {{ interfaces[x] }} + } + track_script { + check_backend + } +} +{% endfor %} diff --git a/src/pybind/mgr/cephadm/templates/services/iscsi/iscsi-gateway.cfg.j2 b/src/pybind/mgr/cephadm/templates/services/iscsi/iscsi-gateway.cfg.j2 new file mode 100644 index 000000000..c2582ace7 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/iscsi/iscsi-gateway.cfg.j2 @@ -0,0 +1,13 @@ +# {{ cephadm_managed }} +[config] +cluster_client_name = {{ client_name }} +pool = {{ spec.pool }} +trusted_ip_list = {{ trusted_ip_list|default("''", true) }} +minimum_gateways = 1 +api_port = {{ spec.api_port|default("''", true) }} +api_user = {{ spec.api_user|default("''", true) }} +api_password = {{ spec.api_password|default("''", true) }} +api_secure = {{ spec.api_secure|default('False', true) }} +log_to_stderr = True +log_to_stderr_prefix = debug +log_to_file = False diff --git a/src/pybind/mgr/cephadm/templates/services/loki.yml.j2 b/src/pybind/mgr/cephadm/templates/services/loki.yml.j2 new file mode 100644 index 000000000..271437231 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/loki.yml.j2 @@ -0,0 +1,28 @@ +# {{ cephadm_managed }} +auth_enabled: false + +server: + http_listen_port: 3100 + grpc_listen_port: 8080 + +common: + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + replication_factor: 1 + ring: + instance_addr: 127.0.0.1 + kvstore: + store: inmemory + +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h diff --git a/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 b/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 new file mode 100644 index 000000000..ab8df7192 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 @@ -0,0 +1,38 @@ +# {{ cephadm_managed }} +NFS_CORE_PARAM { + Enable_NLM = false; + Enable_RQUOTA = false; + Protocols = 4; + NFS_Port = {{ port }}; +{% if bind_addr %} + Bind_addr = {{ bind_addr }}; +{% endif %} +{% if haproxy_hosts %} + HAProxy_Hosts = {{ haproxy_hosts|join(", ") }}; +{% endif %} +} + +NFSv4 { + Delegations = false; + RecoveryBackend = 'rados_cluster'; + Minor_Versions = 1, 2; +} + +RADOS_KV { + UserId = "{{ user }}"; + nodeid = "{{ nodeid }}"; + pool = "{{ pool }}"; + namespace = "{{ namespace }}"; +} + +RADOS_URLS { + UserId = "{{ user }}"; + watch_url = "{{ url }}"; +} + +RGW { + cluster = "ceph"; + name = "client.{{ rgw_user }}"; +} + +%url {{ url }} diff --git a/src/pybind/mgr/cephadm/templates/services/node-exporter/web.yml.j2 b/src/pybind/mgr/cephadm/templates/services/node-exporter/web.yml.j2 new file mode 100644 index 000000000..1c1220345 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/node-exporter/web.yml.j2 @@ -0,0 +1,3 @@ +tls_server_config: + cert_file: node_exporter.crt + key_file: node_exporter.key diff --git a/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 b/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 new file mode 100644 index 000000000..69b8332cd --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 @@ -0,0 +1,34 @@ +# {{ cephadm_managed }} +[gateway] +name = {{ name }} +group = {{ spec.group }} +addr = {{ addr }} +port = {{ port }} +enable_auth = {{ spec.enable_auth }} +state_update_notify = True +state_update_interval_sec = 5 + +[ceph] +pool = {{ spec.pool }} +config_file = /etc/ceph/ceph.conf +id = {{ rados_id }} + +[mtls] +server_key = {{ spec.server_key }} +client_key = {{ spec.client_key }} +server_cert = {{ spec.server_cert }} +client_cert = {{ spec.client_cert }} + +[spdk] +tgt_path = {{ spec.tgt_path }} +rpc_socket = {{ rpc_socket }} +timeout = {{ spec.timeout }} +log_level = {{ log_level }} +conn_retries = {{ spec.conn_retries }} +transports = {{ spec.transports }} +{% if transport_tcp_options %} +transport_tcp_options = {{ transport_tcp_options }} +{% endif %} +{% if spec.tgt_cmd_extra_args %} +tgt_cmd_extra_args = {{ spec.tgt_cmd_extra_args }} +{% endif %} diff --git a/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2 b/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2 new file mode 100644 index 000000000..b56843994 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2 @@ -0,0 +1,109 @@ +# {{ cephadm_managed }} +global: + scrape_interval: 10s + evaluation_interval: 10s +rule_files: + - /etc/prometheus/alerting/* + +{% if alertmanager_sd_url %} +alerting: + alertmanagers: +{% if secure_monitoring_stack %} + - scheme: https + basic_auth: + username: {{ alertmanager_web_user }} + password: {{ alertmanager_web_password }} + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: {{ alertmanager_sd_url }} + basic_auth: + username: {{ service_discovery_username }} + password: {{ service_discovery_password }} + tls_config: + ca_file: root_cert.pem +{% else %} + - scheme: http + http_sd_configs: + - url: {{ alertmanager_sd_url }} +{% endif %} +{% endif %} + +scrape_configs: + - job_name: 'ceph' +{% if secure_monitoring_stack %} + scheme: https + tls_config: + ca_file: mgr_prometheus_cert.pem + honor_labels: true + http_sd_configs: + - url: {{ mgr_prometheus_sd_url }} + basic_auth: + username: {{ service_discovery_username }} + password: {{ service_discovery_password }} + tls_config: + ca_file: root_cert.pem +{% else %} + honor_labels: true + http_sd_configs: + - url: {{ mgr_prometheus_sd_url }} +{% endif %} + +{% if node_exporter_sd_url %} + - job_name: 'node' +{% if secure_monitoring_stack %} + scheme: https + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: {{ node_exporter_sd_url }} + basic_auth: + username: {{ service_discovery_username }} + password: {{ service_discovery_password }} + tls_config: + ca_file: root_cert.pem +{% else %} + http_sd_configs: + - url: {{ node_exporter_sd_url }} +{% endif %} +{% endif %} + +{% if haproxy_sd_url %} + - job_name: 'haproxy' +{% if secure_monitoring_stack %} + scheme: https + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: {{ haproxy_sd_url }} + basic_auth: + username: {{ service_discovery_username }} + password: {{ service_discovery_password }} + tls_config: + ca_file: root_cert.pem +{% else %} + http_sd_configs: + - url: {{ haproxy_sd_url }} +{% endif %} +{% endif %} + +{% if ceph_exporter_sd_url %} + - job_name: 'ceph-exporter' +{% if secure_monitoring_stack %} + honor_labels: true + scheme: https + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: {{ ceph_exporter_sd_url }} + basic_auth: + username: {{ service_discovery_username }} + password: {{ service_discovery_password }} + tls_config: + ca_file: root_cert.pem +{% else %} + honor_labels: true + http_sd_configs: + - url: {{ ceph_exporter_sd_url }} +{% endif %} +{% endif %} diff --git a/src/pybind/mgr/cephadm/templates/services/prometheus/web.yml.j2 b/src/pybind/mgr/cephadm/templates/services/prometheus/web.yml.j2 new file mode 100644 index 000000000..da3c3d724 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/prometheus/web.yml.j2 @@ -0,0 +1,5 @@ +tls_server_config: + cert_file: prometheus.crt + key_file: prometheus.key +basic_auth_users: + {{ prometheus_web_user }}: {{ prometheus_web_password }} diff --git a/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 b/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 new file mode 100644 index 000000000..5ce7a3103 --- /dev/null +++ b/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 @@ -0,0 +1,17 @@ +# {{ cephadm_managed }} +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://{{ client_hostname }}:3100/loki/api/v1/push + +scrape_configs: +- job_name: system + static_configs: + - labels: + job: Cluster Logs + __path__: /var/log/ceph/**/*.log \ No newline at end of file diff --git a/src/pybind/mgr/cephadm/tests/__init__.py b/src/pybind/mgr/cephadm/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/pybind/mgr/cephadm/tests/conftest.py b/src/pybind/mgr/cephadm/tests/conftest.py new file mode 100644 index 000000000..e8add2c7b --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/conftest.py @@ -0,0 +1,27 @@ +import pytest + +from cephadm.services.osd import RemoveUtil, OSD +from tests import mock + +from .fixtures import with_cephadm_module + + +@pytest.fixture() +def cephadm_module(): + with with_cephadm_module({}) as m: + yield m + + +@pytest.fixture() +def rm_util(): + with with_cephadm_module({}) as m: + r = RemoveUtil.__new__(RemoveUtil) + r.__init__(m) + yield r + + +@pytest.fixture() +def osd_obj(): + with mock.patch("cephadm.services.osd.RemoveUtil"): + o = OSD(0, mock.MagicMock()) + yield o diff --git a/src/pybind/mgr/cephadm/tests/fixtures.py b/src/pybind/mgr/cephadm/tests/fixtures.py new file mode 100644 index 000000000..6281283d7 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/fixtures.py @@ -0,0 +1,200 @@ +import fnmatch +import asyncio +import sys +from tempfile import NamedTemporaryFile +from contextlib import contextmanager + +from ceph.deployment.service_spec import PlacementSpec, ServiceSpec +from ceph.utils import datetime_to_str, datetime_now +from cephadm.serve import CephadmServe, cephadmNoImage + +try: + from typing import Any, Iterator, List, Callable, Dict +except ImportError: + pass + +from cephadm import CephadmOrchestrator +from orchestrator import raise_if_exception, OrchResult, HostSpec, DaemonDescriptionStatus +from tests import mock + + +def async_side_effect(result): + async def side_effect(*args, **kwargs): + return result + return side_effect + + +def get_ceph_option(_, key): + return __file__ + + +def get_module_option_ex(_, module, key, default=None): + if module == 'prometheus': + if key == 'server_port': + return 9283 + return None + + +def _run_cephadm(ret): + async def foo(s, host, entity, cmd, e, **kwargs): + if cmd == 'gather-facts': + return '{}', '', 0 + return [ret], '', 0 + return foo + + +def match_glob(val, pat): + ok = fnmatch.fnmatchcase(val, pat) + if not ok: + assert pat in val + + +class MockEventLoopThread: + def get_result(self, coro, timeout): + if sys.version_info >= (3, 7): + return asyncio.run(coro) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + return loop.run_until_complete(coro) + finally: + loop.close() + asyncio.set_event_loop(None) + + +def receive_agent_metadata(m: CephadmOrchestrator, host: str, ops: List[str] = None) -> None: + to_update: Dict[str, Callable[[str, Any], None]] = { + 'ls': m._process_ls_output, + 'gather-facts': m.cache.update_host_facts, + 'list-networks': m.cache.update_host_networks, + } + if ops: + for op in ops: + out = m.wait_async(CephadmServe(m)._run_cephadm_json(host, cephadmNoImage, op, [])) + to_update[op](host, out) + m.cache.last_daemon_update[host] = datetime_now() + m.cache.last_facts_update[host] = datetime_now() + m.cache.last_network_update[host] = datetime_now() + m.cache.metadata_up_to_date[host] = True + + +def receive_agent_metadata_all_hosts(m: CephadmOrchestrator) -> None: + for host in m.cache.get_hosts(): + receive_agent_metadata(m, host) + + +@contextmanager +def with_cephadm_module(module_options=None, store=None): + """ + :param module_options: Set opts as if they were set before module.__init__ is called + :param store: Set the store before module.__init__ is called + """ + with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option), \ + mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ + mock.patch('cephadm.module.CephadmOrchestrator.get_module_option_ex', get_module_option_ex), \ + mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ + mock.patch("cephadm.module.CephadmOrchestrator.remote"), \ + mock.patch("cephadm.agent.CephadmAgentHelpers._request_agent_acks"), \ + mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \ + mock.patch("cephadm.agent.CephadmAgentHelpers._agent_down", return_value=False), \ + mock.patch('cephadm.offline_watcher.OfflineHostWatcher.run'), \ + mock.patch('cephadm.tuned_profiles.TunedProfileUtils._remove_stray_tuned_profiles'), \ + mock.patch('cephadm.offline_watcher.OfflineHostWatcher.run'), \ + mock.patch('cephadm.http_server.CephadmHttpServer.run'): + + m = CephadmOrchestrator.__new__(CephadmOrchestrator) + if module_options is not None: + for k, v in module_options.items(): + m._ceph_set_module_option('cephadm', k, v) + if store is None: + store = {} + if '_ceph_get/mon_map' not in store: + m.mock_store_set('_ceph_get', 'mon_map', { + 'modified': datetime_to_str(datetime_now()), + 'fsid': 'foobar', + }) + if '_ceph_get/mgr_map' not in store: + m.mock_store_set('_ceph_get', 'mgr_map', { + 'services': { + 'dashboard': 'http://[::1]:8080', + 'prometheus': 'http://[::1]:8081' + }, + 'modules': ['dashboard', 'prometheus'], + }) + for k, v in store.items(): + m._ceph_set_store(k, v) + + m.__init__('cephadm', 0, 0) + m._cluster_fsid = "fsid" + + m.event_loop = MockEventLoopThread() + m.tkey = NamedTemporaryFile(prefix='test-cephadm-identity-') + + yield m + + +def wait(m: CephadmOrchestrator, c: OrchResult) -> Any: + return raise_if_exception(c) + + +@contextmanager +def with_host(m: CephadmOrchestrator, name, addr='1::4', refresh_hosts=True, rm_with_force=True): + with mock.patch("cephadm.utils.resolve_ip", return_value=addr): + wait(m, m.add_host(HostSpec(hostname=name))) + if refresh_hosts: + CephadmServe(m)._refresh_hosts_and_daemons() + receive_agent_metadata(m, name) + yield + wait(m, m.remove_host(name, force=rm_with_force)) + + +def assert_rm_service(cephadm: CephadmOrchestrator, srv_name): + mon_or_mgr = cephadm.spec_store[srv_name].spec.service_type in ('mon', 'mgr') + if mon_or_mgr: + assert 'Unable' in wait(cephadm, cephadm.remove_service(srv_name)) + return + assert wait(cephadm, cephadm.remove_service(srv_name)) == f'Removed service {srv_name}' + assert cephadm.spec_store[srv_name].deleted is not None + CephadmServe(cephadm)._check_daemons() + CephadmServe(cephadm)._apply_all_services() + assert cephadm.spec_store[srv_name].deleted + unmanaged = cephadm.spec_store[srv_name].spec.unmanaged + CephadmServe(cephadm)._purge_deleted_services() + if not unmanaged: # cause then we're not deleting daemons + assert srv_name not in cephadm.spec_store, f'{cephadm.spec_store[srv_name]!r}' + + +@contextmanager +def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth=None, host: str = '', status_running=False) -> Iterator[List[str]]: + if spec.placement.is_empty() and host: + spec.placement = PlacementSpec(hosts=[host], count=1) + if meth is not None: + c = meth(cephadm_module, spec) + assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' + else: + c = cephadm_module.apply([spec]) + assert wait(cephadm_module, c) == [f'Scheduled {spec.service_name()} update...'] + + specs = [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] + assert spec in specs + + CephadmServe(cephadm_module)._apply_all_services() + + if status_running: + make_daemons_running(cephadm_module, spec.service_name()) + + dds = wait(cephadm_module, cephadm_module.list_daemons()) + own_dds = [dd for dd in dds if dd.service_name() == spec.service_name()] + if host and spec.service_type != 'osd': + assert own_dds + + yield [dd.name() for dd in own_dds] + + assert_rm_service(cephadm_module, spec.service_name()) + + +def make_daemons_running(cephadm_module, service_name): + own_dds = cephadm_module.cache.get_daemons_by_service(service_name) + for dd in own_dds: + dd.status = DaemonDescriptionStatus.running # We're changing the reference diff --git a/src/pybind/mgr/cephadm/tests/test_autotune.py b/src/pybind/mgr/cephadm/tests/test_autotune.py new file mode 100644 index 000000000..524da9c00 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_autotune.py @@ -0,0 +1,69 @@ +# Disable autopep8 for this file: + +# fmt: off + +import pytest + +from cephadm.autotune import MemoryAutotuner +from orchestrator import DaemonDescription + + +@pytest.mark.parametrize("total,daemons,config,result", + [ # noqa: E128 + ( + 128 * 1024 * 1024 * 1024, + [], + {}, + None, + ), + ( + 128 * 1024 * 1024 * 1024, + [ + DaemonDescription('osd', '1', 'host1'), + DaemonDescription('osd', '2', 'host1'), + ], + {}, + 64 * 1024 * 1024 * 1024, + ), + ( + 128 * 1024 * 1024 * 1024, + [ + DaemonDescription('osd', '1', 'host1'), + DaemonDescription('osd', '2', 'host1'), + DaemonDescription('osd', '3', 'host1'), + ], + { + 'osd.3': 16 * 1024 * 1024 * 1024, + }, + 56 * 1024 * 1024 * 1024, + ), + ( + 128 * 1024 * 1024 * 1024, + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('osd', '1', 'host1'), + DaemonDescription('osd', '2', 'host1'), + ], + {}, + 62 * 1024 * 1024 * 1024, + ) + ]) +def test_autotune(total, daemons, config, result): + def fake_getter(who, opt): + if opt == 'osd_memory_target_autotune': + if who in config: + return False + else: + return True + if opt == 'osd_memory_target': + return config.get(who, 4 * 1024 * 1024 * 1024) + if opt == 'mds_cache_memory_limit': + return 16 * 1024 * 1024 * 1024 + + a = MemoryAutotuner( + total_mem=total, + daemons=daemons, + config_get=fake_getter, + ) + val, osds = a.tune() + assert val == result diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py new file mode 100644 index 000000000..24fcb0280 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -0,0 +1,2709 @@ +import asyncio +import json +import logging + +from contextlib import contextmanager + +import pytest + +from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection +from cephadm.serve import CephadmServe +from cephadm.inventory import HostCacheStatus, ClientKeyringSpec +from cephadm.services.osd import OSD, OSDRemovalQueue, OsdIdClaims +from cephadm.utils import SpecialHostLabels + +try: + from typing import List +except ImportError: + pass + +from ceph.deployment.service_spec import ( + CustomConfig, + CustomContainerSpec, + HostPlacementSpec, + IscsiServiceSpec, + MDSSpec, + NFSServiceSpec, + PlacementSpec, + RGWSpec, + ServiceSpec, +) +from ceph.deployment.drive_selection.selector import DriveSelection +from ceph.deployment.inventory import Devices, Device +from ceph.utils import datetime_to_str, datetime_now, str_to_datetime +from orchestrator import DaemonDescription, InventoryHost, \ + HostSpec, OrchestratorError, DaemonDescriptionStatus, OrchestratorEvent +from tests import mock +from .fixtures import wait, _run_cephadm, match_glob, with_host, \ + with_cephadm_module, with_service, make_daemons_running, async_side_effect +from cephadm.module import CephadmOrchestrator + +""" +TODOs: + There is really room for improvement here. I just quickly assembled theses tests. + I general, everything should be testes in Teuthology as well. Reasons for + also testing this here is the development roundtrip time. +""" + + +def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host): + dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host)) + d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)] + assert d_names + # there should only be one daemon (if not match_glob will throw mismatch) + assert len(d_names) == 1 + + c = cephadm.remove_daemons(d_names) + [out] = wait(cephadm, c) + # picking the 1st element is needed, rather than passing the list when the daemon + # name contains '-' char. If not, the '-' is treated as a range i.e. cephadm-exporter + # is treated like a m-e range which is invalid. rbd-mirror (d-m) and node-exporter (e-e) + # are valid, so pass without incident! Also, match_gob acts on strings anyway! + match_glob(out, f"Removed {d_names[0]}* from host '{host}'") + + +@contextmanager +def with_daemon(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, host: str): + spec.placement = PlacementSpec(hosts=[host], count=1) + + c = cephadm_module.add_daemon(spec) + [out] = wait(cephadm_module, c) + match_glob(out, f"Deployed {spec.service_name()}.* on host '{host}'") + + dds = cephadm_module.cache.get_daemons_by_service(spec.service_name()) + for dd in dds: + if dd.hostname == host: + yield dd.daemon_id + assert_rm_daemon(cephadm_module, spec.service_name(), host) + return + + assert False, 'Daemon not found' + + +@contextmanager +def with_osd_daemon(cephadm_module: CephadmOrchestrator, _run_cephadm, host: str, osd_id: int, ceph_volume_lvm_list=None): + cephadm_module.mock_store_set('_ceph_get', 'osd_map', { + 'osds': [ + { + 'osd': 1, + 'up_from': 0, + 'up': True, + 'uuid': 'uuid' + } + ] + }) + + _run_cephadm.reset_mock(return_value=True, side_effect=True) + if ceph_volume_lvm_list: + _run_cephadm.side_effect = ceph_volume_lvm_list + else: + async def _ceph_volume_list(s, host, entity, cmd, **kwargs): + logging.info(f'ceph-volume cmd: {cmd}') + if 'raw' in cmd: + return json.dumps({ + "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": { + "ceph_fsid": cephadm_module._cluster_fsid, + "device": "/dev/loop0", + "osd_id": 21, + "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5", + "type": "bluestore" + }, + }), '', 0 + if 'lvm' in cmd: + return json.dumps({ + str(osd_id): [{ + 'tags': { + 'ceph.cluster_fsid': cephadm_module._cluster_fsid, + 'ceph.osd_fsid': 'uuid' + }, + 'type': 'data' + }] + }), '', 0 + return '{}', '', 0 + + _run_cephadm.side_effect = _ceph_volume_list + + assert cephadm_module._osd_activate( + [host]).stdout == f"Created osd(s) 1 on host '{host}'" + assert _run_cephadm.mock_calls == [ + mock.call(host, 'osd', 'ceph-volume', + ['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True), + mock.call(host, f'osd.{osd_id}', ['_orch', 'deploy'], [], stdin=mock.ANY), + mock.call(host, 'osd', 'ceph-volume', + ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True), + ] + dd = cephadm_module.cache.get_daemon(f'osd.{osd_id}', host=host) + assert dd.name() == f'osd.{osd_id}' + yield dd + cephadm_module._remove_daemons([(f'osd.{osd_id}', host)]) + + +class TestCephadm(object): + + def test_get_unique_name(self, cephadm_module): + # type: (CephadmOrchestrator) -> None + existing = [ + DaemonDescription(daemon_type='mon', daemon_id='a') + ] + new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing) + match_glob(new_mon, 'myhost') + new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing) + match_glob(new_mgr, 'myhost.*') + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_host(self, cephadm_module): + assert wait(cephadm_module, cephadm_module.get_hosts()) == [] + with with_host(cephadm_module, 'test'): + assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')] + + # Be careful with backward compatibility when changing things here: + assert json.loads(cephadm_module.get_store('inventory')) == \ + {"test": {"hostname": "test", "addr": "1::4", "labels": [], "status": ""}} + + with with_host(cephadm_module, 'second', '1.2.3.5'): + assert wait(cephadm_module, cephadm_module.get_hosts()) == [ + HostSpec('test', '1::4'), + HostSpec('second', '1.2.3.5') + ] + + assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')] + assert wait(cephadm_module, cephadm_module.get_hosts()) == [] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + @mock.patch("cephadm.utils.resolve_ip") + def test_re_add_host_receive_loopback(self, resolve_ip, cephadm_module): + resolve_ip.side_effect = ['192.168.122.1', '127.0.0.1', '127.0.0.1'] + assert wait(cephadm_module, cephadm_module.get_hosts()) == [] + cephadm_module._add_host(HostSpec('test', '192.168.122.1')) + assert wait(cephadm_module, cephadm_module.get_hosts()) == [ + HostSpec('test', '192.168.122.1')] + cephadm_module._add_host(HostSpec('test')) + assert wait(cephadm_module, cephadm_module.get_hosts()) == [ + HostSpec('test', '192.168.122.1')] + with pytest.raises(OrchestratorError): + cephadm_module._add_host(HostSpec('test2')) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_service_ls(self, cephadm_module): + with with_host(cephadm_module, 'test'): + c = cephadm_module.list_daemons(refresh=True) + assert wait(cephadm_module, c) == [] + with with_service(cephadm_module, MDSSpec('mds', 'name', unmanaged=True)) as _, \ + with_daemon(cephadm_module, MDSSpec('mds', 'name'), 'test') as _: + + c = cephadm_module.list_daemons() + + def remove_id_events(dd): + out = dd.to_json() + del out['daemon_id'] + del out['events'] + del out['daemon_name'] + return out + + assert [remove_id_events(dd) for dd in wait(cephadm_module, c)] == [ + { + 'service_name': 'mds.name', + 'daemon_type': 'mds', + 'hostname': 'test', + 'status': 2, + 'status_desc': 'starting', + 'is_active': False, + 'ports': [], + } + ] + + with with_service(cephadm_module, ServiceSpec('rgw', 'r.z'), + CephadmOrchestrator.apply_rgw, 'test', status_running=True): + make_daemons_running(cephadm_module, 'mds.name') + + c = cephadm_module.describe_service() + out = [dict(o.to_json()) for o in wait(cephadm_module, c)] + expected = [ + { + 'placement': {'count': 2}, + 'service_id': 'name', + 'service_name': 'mds.name', + 'service_type': 'mds', + 'status': {'created': mock.ANY, 'running': 1, 'size': 2}, + 'unmanaged': True + }, + { + 'placement': { + 'count': 1, + 'hosts': ["test"] + }, + 'service_id': 'r.z', + 'service_name': 'rgw.r.z', + 'service_type': 'rgw', + 'status': {'created': mock.ANY, 'running': 1, 'size': 1, + 'ports': [80]}, + } + ] + for o in out: + if 'events' in o: + del o['events'] # delete it, as it contains a timestamp + assert out == expected + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_service_ls_service_type_flag(self, cephadm_module): + with with_host(cephadm_module, 'host1'): + with with_host(cephadm_module, 'host2'): + with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2)), + CephadmOrchestrator.apply_mgr, '', status_running=True): + with with_service(cephadm_module, MDSSpec('mds', 'test-id', placement=PlacementSpec(count=2)), + CephadmOrchestrator.apply_mds, '', status_running=True): + + # with no service-type. Should provide info fot both services + c = cephadm_module.describe_service() + out = [dict(o.to_json()) for o in wait(cephadm_module, c)] + expected = [ + { + 'placement': {'count': 2}, + 'service_name': 'mgr', + 'service_type': 'mgr', + 'status': {'created': mock.ANY, + 'running': 2, + 'size': 2} + }, + { + 'placement': {'count': 2}, + 'service_id': 'test-id', + 'service_name': 'mds.test-id', + 'service_type': 'mds', + 'status': {'created': mock.ANY, + 'running': 2, + 'size': 2} + }, + ] + + for o in out: + if 'events' in o: + del o['events'] # delete it, as it contains a timestamp + assert out == expected + + # with service-type. Should provide info fot only mds + c = cephadm_module.describe_service(service_type='mds') + out = [dict(o.to_json()) for o in wait(cephadm_module, c)] + expected = [ + { + 'placement': {'count': 2}, + 'service_id': 'test-id', + 'service_name': 'mds.test-id', + 'service_type': 'mds', + 'status': {'created': mock.ANY, + 'running': 2, + 'size': 2} + }, + ] + + for o in out: + if 'events' in o: + del o['events'] # delete it, as it contains a timestamp + assert out == expected + + # service-type should not match with service names + c = cephadm_module.describe_service(service_type='mds.test-id') + out = [dict(o.to_json()) for o in wait(cephadm_module, c)] + assert out == [] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_device_ls(self, cephadm_module): + with with_host(cephadm_module, 'test'): + c = cephadm_module.get_inventory() + assert wait(cephadm_module, c) == [InventoryHost('test')] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( + json.dumps([ + dict( + name='rgw.myrgw.foobar', + style='cephadm', + fsid='fsid', + container_id='container_id', + version='version', + state='running', + ), + dict( + name='something.foo.bar', + style='cephadm', + fsid='fsid', + ), + dict( + name='haproxy.test.bar', + style='cephadm', + fsid='fsid', + ), + + ]) + )) + def test_list_daemons(self, cephadm_module: CephadmOrchestrator): + cephadm_module.service_cache_timeout = 10 + with with_host(cephadm_module, 'test'): + CephadmServe(cephadm_module)._refresh_host_daemons('test') + dds = wait(cephadm_module, cephadm_module.list_daemons()) + assert {d.name() for d in dds} == {'rgw.myrgw.foobar', 'haproxy.test.bar'} + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_daemon_action(self, cephadm_module: CephadmOrchestrator): + cephadm_module.service_cache_timeout = 10 + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \ + with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id: + + d_name = 'rgw.' + daemon_id + + c = cephadm_module.daemon_action('redeploy', d_name) + assert wait(cephadm_module, + c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'" + + for what in ('start', 'stop', 'restart'): + c = cephadm_module.daemon_action(what, d_name) + assert wait(cephadm_module, + c) == F"Scheduled to {what} {d_name} on host 'test'" + + # Make sure, _check_daemons does a redeploy due to monmap change: + cephadm_module._store['_ceph_get/mon_map'] = { + 'modified': datetime_to_str(datetime_now()), + 'fsid': 'foobar', + } + cephadm_module.notify('mon_map', None) + + CephadmServe(cephadm_module)._check_daemons() + + assert cephadm_module.events.get_for_daemon(d_name) == [ + OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO', + f"Deployed {d_name} on host \'test\'"), + OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO', + f"stop {d_name} from host \'test\'"), + ] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): + cephadm_module.service_cache_timeout = 10 + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \ + with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id: + with mock.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command: + + _ceph_send_command.side_effect = Exception("myerror") + + # Make sure, _check_daemons does a redeploy due to monmap change: + cephadm_module.mock_store_set('_ceph_get', 'mon_map', { + 'modified': datetime_to_str(datetime_now()), + 'fsid': 'foobar', + }) + cephadm_module.notify('mon_map', None) + + CephadmServe(cephadm_module)._check_daemons() + + evs = [e.message for e in cephadm_module.events.get_for_daemon( + f'rgw.{daemon_id}')] + + assert 'myerror' in ''.join(evs) + + @pytest.mark.parametrize( + "action", + [ + 'start', + 'stop', + 'restart', + 'reconfig', + 'redeploy' + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("cephadm.module.HostCache.save_host") + def test_daemon_check(self, _save_host, cephadm_module: CephadmOrchestrator, action): + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test') as d_names: + [daemon_name] = d_names + + cephadm_module._schedule_daemon_action(daemon_name, action) + + assert cephadm_module.cache.get_scheduled_daemon_action( + 'test', daemon_name) == action + + CephadmServe(cephadm_module)._check_daemons() + + assert _save_host.called_with('test') + assert cephadm_module.cache.get_scheduled_daemon_action('test', daemon_name) is None + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_daemon_check_extra_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + + # Also testing deploying mons without explicit network placement + cephadm_module.check_mon_command({ + 'prefix': 'config set', + 'who': 'mon', + 'name': 'public_network', + 'value': '127.0.0.0/8' + }) + + cephadm_module.cache.update_host_networks( + 'test', + { + "127.0.0.0/8": [ + "127.0.0.1" + ], + } + ) + + with with_service(cephadm_module, ServiceSpec(service_type='mon'), CephadmOrchestrator.apply_mon, 'test') as d_names: + [daemon_name] = d_names + + cephadm_module._set_extra_ceph_conf('[mon]\nk=v') + + CephadmServe(cephadm_module)._check_daemons() + + _run_cephadm.assert_called_with( + 'test', + 'mon.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "mon.test", + "image": '', + "deploy_arguments": [], + "params": { + 'reconfig': True, + }, + "meta": { + 'service_name': 'mon', + 'ports': [], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "[mon]\nk=v\n[mon.test]\npublic network = 127.0.0.0/8\n", + "keyring": "", + "files": { + "config": "[mon.test]\npublic network = 127.0.0.0/8\n" + }, + }, + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_mon_crush_location_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + cephadm_module.check_mon_command({ + 'prefix': 'config set', + 'who': 'mon', + 'name': 'public_network', + 'value': '127.0.0.0/8' + }) + + cephadm_module.cache.update_host_networks( + 'test', + { + "127.0.0.0/8": [ + "127.0.0.1" + ], + } + ) + + with with_service(cephadm_module, ServiceSpec(service_type='mon', crush_locations={'test': ['datacenter=a', 'rack=2']}), CephadmOrchestrator.apply_mon, 'test'): + _run_cephadm.assert_called_with( + 'test', + 'mon.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "mon.test", + "image": '', + "deploy_arguments": [], + "params": {}, + "meta": { + 'service_name': 'mon', + 'ports': [], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "[mon.test]\npublic network = 127.0.0.0/8\n", + "keyring": "", + "files": { + "config": "[mon.test]\npublic network = 127.0.0.0/8\n", + }, + "crush_location": "datacenter=a", + }, + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_extra_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='crash', extra_container_args=['--cpus=2', '--quiet']), CephadmOrchestrator.apply_crash): + _run_cephadm.assert_called_with( + 'test', + 'crash.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "crash.test", + "image": '', + "deploy_arguments": [], + "params": { + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + }, + "meta": { + 'service_name': 'crash', + 'ports': [], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "", + "keyring": "[client.crash.test]\nkey = None\n", + }, + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_extra_entrypoint_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='node-exporter', + extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg']), + CephadmOrchestrator.apply_node_exporter): + _run_cephadm.assert_called_with( + 'test', + 'node-exporter.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "node-exporter.test", + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9100], + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "meta": { + 'service_name': 'node-exporter', + 'ports': [9100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "config_blobs": {}, + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_extra_entrypoint_and_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='node-exporter', + extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg'], + extra_container_args=['--cpus=2', '--quiet']), + CephadmOrchestrator.apply_node_exporter): + _run_cephadm.assert_called_with( + 'test', + 'node-exporter.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "node-exporter.test", + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9100], + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "meta": { + 'service_name': 'node-exporter', + 'ports': [9100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "config_blobs": {}, + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_extra_entrypoint_and_container_args_with_spaces(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='node-exporter', + extra_entrypoint_args=['--entrypoint-arg-with-value value', '--some-other-arg 3'], + extra_container_args=['--cpus 2', '--container-arg-with-value value']), + CephadmOrchestrator.apply_node_exporter): + _run_cephadm.assert_called_with( + 'test', + 'node-exporter.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "node-exporter.test", + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9100], + 'extra_container_args': [ + "--cpus", + "2", + "--container-arg-with-value", + "value", + ], + 'extra_entrypoint_args': [ + "--entrypoint-arg-with-value", + "value", + "--some-other-arg", + "3", + ], + }, + "meta": { + 'service_name': 'node-exporter', + 'ports': [9100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': [ + "--cpus 2", + "--container-arg-with-value value", + ], + 'extra_entrypoint_args': [ + "--entrypoint-arg-with-value value", + "--some-other-arg 3", + ], + }, + "config_blobs": {}, + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_custom_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + test_cert = ['-----BEGIN PRIVATE KEY-----', + 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg', + 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=', + '-----END PRIVATE KEY-----', + '-----BEGIN CERTIFICATE-----', + 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg', + 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=', + '-----END CERTIFICATE-----'] + configs = [ + CustomConfig(content='something something something', + mount_path='/etc/test.conf'), + CustomConfig(content='\n'.join(test_cert), mount_path='/usr/share/grafana/thing.crt') + ] + tc_joined = '\n'.join(test_cert) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='crash', custom_configs=configs), CephadmOrchestrator.apply_crash): + _run_cephadm( + 'test', + 'crash.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "crash.test", + "image": "", + "deploy_arguments": [], + "params": {}, + "meta": { + "service_name": "crash", + "ports": [], + "ip": None, + "deployed_by": [], + "rank": None, + "rank_generation": None, + "extra_container_args": None, + "extra_entrypoint_args": None, + }, + "config_blobs": { + "config": "", + "keyring": "[client.crash.test]\nkey = None\n", + "custom_config_files": [ + { + "content": "something something something", + "mount_path": "/etc/test.conf", + }, + { + "content": tc_joined, + "mount_path": "/usr/share/grafana/thing.crt", + }, + ] + } + }), + ) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'): + + # Make sure, _check_daemons does a redeploy due to monmap change: + cephadm_module.mock_store_set('_ceph_get', 'mon_map', { + 'modified': datetime_to_str(datetime_now()), + 'fsid': 'foobar', + }) + cephadm_module.notify('mon_map', None) + cephadm_module.mock_store_set('_ceph_get', 'mgr_map', { + 'modules': ['dashboard'] + }) + + with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: + CephadmServe(cephadm_module)._check_daemons() + _mon_cmd.assert_any_call( + {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'}, + None) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1.2.3.4') + def test_iscsi_post_actions_with_missing_daemon_in_cache(self, cephadm_module: CephadmOrchestrator): + # https://tracker.ceph.com/issues/52866 + with with_host(cephadm_module, 'test1'): + with with_host(cephadm_module, 'test2'): + with with_service(cephadm_module, IscsiServiceSpec(service_id='foobar', pool='pool', placement=PlacementSpec(host_pattern='*')), CephadmOrchestrator.apply_iscsi, 'test'): + + CephadmServe(cephadm_module)._apply_all_services() + assert len(cephadm_module.cache.get_daemons_by_type('iscsi')) == 2 + + # get a daemons from postaction list (ARRGH sets!!) + tempset = cephadm_module.requires_post_actions.copy() + tempdaemon1 = tempset.pop() + tempdaemon2 = tempset.pop() + + # make sure post actions has 2 daemons in it + assert len(cephadm_module.requires_post_actions) == 2 + + # replicate a host cache that is not in sync when check_daemons is called + tempdd1 = cephadm_module.cache.get_daemon(tempdaemon1) + tempdd2 = cephadm_module.cache.get_daemon(tempdaemon2) + host = 'test1' + if 'test1' not in tempdaemon1: + host = 'test2' + cephadm_module.cache.rm_daemon(host, tempdaemon1) + + # Make sure, _check_daemons does a redeploy due to monmap change: + cephadm_module.mock_store_set('_ceph_get', 'mon_map', { + 'modified': datetime_to_str(datetime_now()), + 'fsid': 'foobar', + }) + cephadm_module.notify('mon_map', None) + cephadm_module.mock_store_set('_ceph_get', 'mgr_map', { + 'modules': ['dashboard'] + }) + + with mock.patch("cephadm.module.IscsiService.config_dashboard") as _cfg_db: + CephadmServe(cephadm_module)._check_daemons() + _cfg_db.assert_called_once_with([tempdd2]) + + # post actions still has the other daemon in it and will run next _check_daemons + assert len(cephadm_module.requires_post_actions) == 1 + + # post actions was missed for a daemon + assert tempdaemon1 in cephadm_module.requires_post_actions + + # put the daemon back in the cache + cephadm_module.cache.add_daemon(host, tempdd1) + + _cfg_db.reset_mock() + # replicate serve loop running again + CephadmServe(cephadm_module)._check_daemons() + + # post actions should have been called again + _cfg_db.asset_called() + + # post actions is now empty + assert len(cephadm_module.requires_post_actions) == 0 + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_mon_add(self, cephadm_module): + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='mon', unmanaged=True)): + ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) + c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps)) + assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"] + + with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"): + ps = PlacementSpec(hosts=['test'], count=1) + c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps)) + wait(cephadm_module, c) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_mgr_update(self, cephadm_module): + with with_host(cephadm_module, 'test'): + ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) + r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps)) + assert r + + assert_rm_daemon(cephadm_module, 'mgr.a', 'test') + + @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") + def test_find_destroyed_osds(self, _mon_cmd, cephadm_module): + dict_out = { + "nodes": [ + { + "id": -1, + "name": "default", + "type": "root", + "type_id": 11, + "children": [ + -3 + ] + }, + { + "id": -3, + "name": "host1", + "type": "host", + "type_id": 1, + "pool_weights": {}, + "children": [ + 0 + ] + }, + { + "id": 0, + "device_class": "hdd", + "name": "osd.0", + "type": "osd", + "type_id": 0, + "crush_weight": 0.0243988037109375, + "depth": 2, + "pool_weights": {}, + "exists": 1, + "status": "destroyed", + "reweight": 1, + "primary_affinity": 1 + } + ], + "stray": [] + } + json_out = json.dumps(dict_out) + _mon_cmd.return_value = (0, json_out, '') + osd_claims = OsdIdClaims(cephadm_module) + assert osd_claims.get() == {'host1': ['0']} + assert osd_claims.filtered_by_host('host1') == ['0'] + assert osd_claims.filtered_by_host('host1.domain.com') == ['0'] + + @ pytest.mark.parametrize( + "ceph_services, cephadm_daemons, strays_expected, metadata", + # [ ([(daemon_type, daemon_id), ... ], [...], [...]), ... ] + [ + ( + [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], + [], + [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], + {}, + ), + ( + [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], + [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], + [], + {}, + ), + ( + [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], + [('mds', 'a'), ('osd', '0')], + [('mgr', 'x')], + {}, + ), + # https://tracker.ceph.com/issues/49573 + ( + [('rgw-nfs', '14649')], + [], + [('nfs', 'foo-rgw.host1')], + {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}}, + ), + ( + [('rgw-nfs', '14649'), ('rgw-nfs', '14650')], + [('nfs', 'foo-rgw.host1'), ('nfs', 'foo2.host2')], + [], + {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}}, + ), + ( + [('rgw-nfs', '14649'), ('rgw-nfs', '14650')], + [('nfs', 'foo-rgw.host1')], + [('nfs', 'foo2.host2')], + {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}}, + ), + ] + ) + def test_check_for_stray_daemons( + self, + cephadm_module, + ceph_services, + cephadm_daemons, + strays_expected, + metadata + ): + # mock ceph service-map + services = [] + for service in ceph_services: + s = {'type': service[0], 'id': service[1]} + services.append(s) + ls = [{'hostname': 'host1', 'services': services}] + + with mock.patch.object(cephadm_module, 'list_servers', mock.MagicMock()) as list_servers: + list_servers.return_value = ls + list_servers.__iter__.side_effect = ls.__iter__ + + # populate cephadm daemon cache + dm = {} + for daemon_type, daemon_id in cephadm_daemons: + dd = DaemonDescription(daemon_type=daemon_type, daemon_id=daemon_id) + dm[dd.name()] = dd + cephadm_module.cache.update_host_daemons('host1', dm) + + def get_metadata_mock(svc_type, svc_id, default): + return metadata[svc_id] + + with mock.patch.object(cephadm_module, 'get_metadata', new_callable=lambda: get_metadata_mock): + + # test + CephadmServe(cephadm_module)._check_for_strays() + + # verify + strays = cephadm_module.health_checks.get('CEPHADM_STRAY_DAEMON') + if not strays: + assert len(strays_expected) == 0 + else: + for dt, di in strays_expected: + name = '%s.%s' % (dt, di) + for detail in strays['detail']: + if name in detail: + strays['detail'].remove(detail) + break + assert name in detail + assert len(strays['detail']) == 0 + assert strays['count'] == len(strays_expected) + + @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") + def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module): + _mon_cmd.return_value = (1, "", "fail_msg") + with pytest.raises(OrchestratorError): + OsdIdClaims(cephadm_module) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + + spec = DriveGroupSpec( + service_id='foo', + placement=PlacementSpec( + host_pattern='*', + ), + data_devices=DeviceSelection( + all=True + ) + ) + + c = cephadm_module.apply([spec]) + assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] + + inventory = Devices([ + Device( + '/dev/sdb', + available=True + ), + ]) + + cephadm_module.cache.update_host_devices('test', inventory.devices) + + _run_cephadm.side_effect = async_side_effect((['{}'], '', 0)) + + assert CephadmServe(cephadm_module)._apply_all_services() is False + + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', + ['--config-json', '-', '--', 'lvm', 'batch', + '--no-auto', '/dev/sdb', '--yes', '--no-systemd'], + env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, + stdin='{"config": "", "keyring": ""}') + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True) + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + + spec = DriveGroupSpec( + service_id='noncollocated', + placement=PlacementSpec( + hosts=['test'] + ), + data_devices=DeviceSelection(paths=['/dev/sdb']), + db_devices=DeviceSelection(paths=['/dev/sdc']), + wal_devices=DeviceSelection(paths=['/dev/sdd']) + ) + + c = cephadm_module.apply([spec]) + assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...'] + + inventory = Devices([ + Device('/dev/sdb', available=True), + Device('/dev/sdc', available=True), + Device('/dev/sdd', available=True) + ]) + + cephadm_module.cache.update_host_devices('test', inventory.devices) + + _run_cephadm.side_effect = async_side_effect((['{}'], '', 0)) + + assert CephadmServe(cephadm_module)._apply_all_services() is False + + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', + ['--config-json', '-', '--', 'lvm', 'batch', + '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc', + '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'], + env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'], + error_ok=True, stdin='{"config": "", "keyring": ""}') + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True) + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("cephadm.module.SpecStore.save") + def test_apply_osd_save_placement(self, _save_spec, cephadm_module): + with with_host(cephadm_module, 'test'): + json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, + 'service_id': 'foo', 'data_devices': {'all': True}} + spec = ServiceSpec.from_json(json_spec) + assert isinstance(spec, DriveGroupSpec) + c = cephadm_module.apply([spec]) + assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] + _save_spec.assert_called_with(spec) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_create_osds(self, cephadm_module): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(paths=[''])) + c = cephadm_module.create_osds(dg) + out = wait(cephadm_module, c) + assert out == "Created no osd(s) on host test; already created?" + bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_host'), + data_devices=DeviceSelection(paths=[''])) + c = cephadm_module.create_osds(bad_dg) + out = wait(cephadm_module, c) + assert "Invalid 'host:device' spec: host not found in cluster" in out + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_create_noncollocated_osd(self, cephadm_module): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(paths=[''])) + c = cephadm_module.create_osds(dg) + out = wait(cephadm_module, c) + assert out == "Created no osd(s) on host test; already created?" + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch('cephadm.services.osd.OSDService._run_ceph_volume_command') + @mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume') + @mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _: None) + @mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _: {}) + def test_limit_not_reached(self, d_to_cv, _run_cv_cmd, cephadm_module): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(limit=5, rotational=1), + service_id='not_enough') + + disks_found = [ + '[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]'] + d_to_cv.return_value = 'foo' + _run_cv_cmd.side_effect = async_side_effect((disks_found, '', 0)) + preview = cephadm_module.osd_service.generate_previews([dg], 'test') + + for osd in preview: + assert 'notes' in osd + assert osd['notes'] == [ + 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)'] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_prepare_drivegroup(self, cephadm_module): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(paths=[''])) + out = cephadm_module.osd_service.prepare_drivegroup(dg) + assert len(out) == 1 + f1 = out[0] + assert f1[0] == 'test' + assert isinstance(f1[1], DriveSelection) + + @pytest.mark.parametrize( + "devices, preview, exp_commands", + [ + # no preview and only one disk, prepare is used due the hack that is in place. + (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --yes --no-systemd"]), + # no preview and multiple disks, uses batch + (['/dev/sda', '/dev/sdb'], False, + ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"]), + # preview and only one disk needs to use batch again to generate the preview + (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"]), + # preview and multiple disks work the same + (['/dev/sda', '/dev/sdb'], True, + ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"]), + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_commands): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec( + host_pattern='test'), data_devices=DeviceSelection(paths=devices)) + ds = DriveSelection(dg, Devices([Device(path) for path in devices])) + preview = preview + out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) + assert all(any(cmd in exp_cmd for exp_cmd in exp_commands) + for cmd in out), f'Expected cmds from f{out} in {exp_commands}' + + @pytest.mark.parametrize( + "devices, preview, exp_commands", + [ + # one data device, no preview + (['/dev/sda'], False, ["raw prepare --bluestore --data /dev/sda"]), + # multiple data devices, no preview + (['/dev/sda', '/dev/sdb'], False, + ["raw prepare --bluestore --data /dev/sda", "raw prepare --bluestore --data /dev/sdb"]), + # one data device, preview + (['/dev/sda'], True, ["raw prepare --bluestore --data /dev/sda --report --format json"]), + # multiple data devices, preview + (['/dev/sda', '/dev/sdb'], True, + ["raw prepare --bluestore --data /dev/sda --report --format json", "raw prepare --bluestore --data /dev/sdb --report --format json"]), + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_raw_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_commands): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(service_id='test.spec', method='raw', placement=PlacementSpec( + host_pattern='test'), data_devices=DeviceSelection(paths=devices)) + ds = DriveSelection(dg, Devices([Device(path) for path in devices])) + preview = preview + out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) + assert all(any(cmd in exp_cmd for exp_cmd in exp_commands) + for cmd in out), f'Expected cmds from f{out} in {exp_commands}' + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( + json.dumps([ + dict( + name='osd.0', + style='cephadm', + fsid='fsid', + container_id='container_id', + version='version', + state='running', + ) + ]) + )) + @mock.patch("cephadm.services.osd.OSD.exists", True) + @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0) + def test_remove_osds(self, cephadm_module): + with with_host(cephadm_module, 'test'): + CephadmServe(cephadm_module)._refresh_host_daemons('test') + c = cephadm_module.list_daemons() + wait(cephadm_module, c) + + c = cephadm_module.remove_daemons(['osd.0']) + out = wait(cephadm_module, c) + assert out == ["Removed osd.0 from host 'test'"] + + cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0, + replace=False, + force=False, + hostname='test', + process_started_at=datetime_now(), + remove_util=cephadm_module.to_remove_osds.rm_util + )) + cephadm_module.to_remove_osds.process_removal_queue() + assert cephadm_module.to_remove_osds == OSDRemovalQueue(cephadm_module) + + c = cephadm_module.remove_osds_status() + out = wait(cephadm_module, c) + assert out == [] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_rgw_update(self, cephadm_module): + with with_host(cephadm_module, 'host1'): + with with_host(cephadm_module, 'host2'): + with with_service(cephadm_module, RGWSpec(service_id="foo", unmanaged=True)): + ps = PlacementSpec(hosts=['host1'], count=1) + c = cephadm_module.add_daemon( + RGWSpec(service_id="foo", placement=ps)) + [out] = wait(cephadm_module, c) + match_glob(out, "Deployed rgw.foo.* on host 'host1'") + + ps = PlacementSpec(hosts=['host1', 'host2'], count=2) + r = CephadmServe(cephadm_module)._apply_service( + RGWSpec(service_id="foo", placement=ps)) + assert r + + assert_rm_daemon(cephadm_module, 'rgw.foo', 'host1') + assert_rm_daemon(cephadm_module, 'rgw.foo', 'host2') + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( + json.dumps([ + dict( + name='rgw.myrgw.myhost.myid', + style='cephadm', + fsid='fsid', + container_id='container_id', + version='version', + state='running', + ) + ]) + )) + def test_remove_daemon(self, cephadm_module): + with with_host(cephadm_module, 'test'): + CephadmServe(cephadm_module)._refresh_host_daemons('test') + c = cephadm_module.list_daemons() + wait(cephadm_module, c) + c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid']) + out = wait(cephadm_module, c) + assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_remove_duplicate_osds(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'host1'): + with with_host(cephadm_module, 'host2'): + with with_osd_daemon(cephadm_module, _run_cephadm, 'host1', 1) as dd1: # type: DaemonDescription + with with_osd_daemon(cephadm_module, _run_cephadm, 'host2', 1) as dd2: # type: DaemonDescription + CephadmServe(cephadm_module)._check_for_moved_osds() + # both are in status "starting" + assert len(cephadm_module.cache.get_daemons()) == 2 + + dd1.status = DaemonDescriptionStatus.running + dd2.status = DaemonDescriptionStatus.error + cephadm_module.cache.update_host_daemons(dd1.hostname, {dd1.name(): dd1}) + cephadm_module.cache.update_host_daemons(dd2.hostname, {dd2.name(): dd2}) + CephadmServe(cephadm_module)._check_for_moved_osds() + assert len(cephadm_module.cache.get_daemons()) == 1 + + assert cephadm_module.events.get_for_daemon('osd.1') == [ + OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO', + "Deployed osd.1 on host 'host1'"), + OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO', + "Deployed osd.1 on host 'host2'"), + OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO', + "Removed duplicated daemon on host 'host2'"), + ] + + with pytest.raises(AssertionError): + cephadm_module.assert_issued_mon_command({ + 'prefix': 'auth rm', + 'entity': 'osd.1', + }) + + cephadm_module.assert_issued_mon_command({ + 'prefix': 'auth rm', + 'entity': 'osd.1', + }) + + @pytest.mark.parametrize( + "spec", + [ + ServiceSpec('crash'), + ServiceSpec('prometheus'), + ServiceSpec('grafana'), + ServiceSpec('node-exporter'), + ServiceSpec('alertmanager'), + ServiceSpec('rbd-mirror'), + ServiceSpec('cephfs-mirror'), + ServiceSpec('mds', service_id='fsname'), + RGWSpec(rgw_realm='realm', rgw_zone='zone'), + RGWSpec(service_id="foo"), + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_daemon_add(self, spec: ServiceSpec, cephadm_module): + unmanaged_spec = ServiceSpec.from_json(spec.to_json()) + unmanaged_spec.unmanaged = True + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, unmanaged_spec): + with with_daemon(cephadm_module, spec, 'test'): + pass + + @pytest.mark.parametrize( + "entity,success,spec", + [ + ('mgr.x', True, ServiceSpec( + service_type='mgr', + placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1), + unmanaged=True) + ), # noqa: E124 + ('client.rgw.x', True, ServiceSpec( + service_type='rgw', + service_id='id', + placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1), + unmanaged=True) + ), # noqa: E124 + ('client.nfs.x', True, ServiceSpec( + service_type='nfs', + service_id='id', + placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1), + unmanaged=True) + ), # noqa: E124 + ('mon.', False, ServiceSpec( + service_type='mon', + placement=PlacementSpec( + hosts=[HostPlacementSpec('test', '127.0.0.0/24', 'x')], count=1), + unmanaged=True) + ), # noqa: E124 + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) + @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) + @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock()) + def test_daemon_add_fail(self, _run_cephadm, entity, success, spec, cephadm_module): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec): + _run_cephadm.side_effect = OrchestratorError('fail') + with pytest.raises(OrchestratorError): + wait(cephadm_module, cephadm_module.add_daemon(spec)) + if success: + cephadm_module.assert_issued_mon_command({ + 'prefix': 'auth rm', + 'entity': entity, + }) + else: + with pytest.raises(AssertionError): + cephadm_module.assert_issued_mon_command({ + 'prefix': 'auth rm', + 'entity': entity, + }) + assert cephadm_module.events.get_for_service(spec.service_name()) == [ + OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'INFO', + "service was created"), + OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'ERROR', + "fail"), + ] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_daemon_place_fail_health_warning(self, _run_cephadm, cephadm_module): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + _run_cephadm.side_effect = OrchestratorError('fail') + ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) + r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps)) + assert not r + assert cephadm_module.health_checks.get('CEPHADM_DAEMON_PLACE_FAIL') is not None + assert cephadm_module.health_checks['CEPHADM_DAEMON_PLACE_FAIL']['count'] == 1 + assert 'Failed to place 1 daemon(s)' in cephadm_module.health_checks[ + 'CEPHADM_DAEMON_PLACE_FAIL']['summary'] + assert 'Failed while placing mgr.a on test: fail' in cephadm_module.health_checks[ + 'CEPHADM_DAEMON_PLACE_FAIL']['detail'] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_apply_spec_fail_health_warning(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + CephadmServe(cephadm_module)._apply_all_services() + ps = PlacementSpec(hosts=['fail'], count=1) + r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps)) + assert not r + assert cephadm_module.apply_spec_fails + assert cephadm_module.health_checks.get('CEPHADM_APPLY_SPEC_FAIL') is not None + assert cephadm_module.health_checks['CEPHADM_APPLY_SPEC_FAIL']['count'] == 1 + assert 'Failed to apply 1 service(s)' in cephadm_module.health_checks[ + 'CEPHADM_APPLY_SPEC_FAIL']['summary'] + + @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option") + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.module.HostCache.save_host_devices") + def test_invalid_config_option_health_warning(self, _save_devs, _run_cephadm, get_foreign_ceph_option, cephadm_module: CephadmOrchestrator): + _save_devs.return_value = None + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) + get_foreign_ceph_option.side_effect = KeyError + CephadmServe(cephadm_module)._apply_service_config( + ServiceSpec('mgr', placement=ps, config={'test': 'foo'})) + assert cephadm_module.health_checks.get('CEPHADM_INVALID_CONFIG_OPTION') is not None + assert cephadm_module.health_checks['CEPHADM_INVALID_CONFIG_OPTION']['count'] == 1 + assert 'Ignoring 1 invalid config option(s)' in cephadm_module.health_checks[ + 'CEPHADM_INVALID_CONFIG_OPTION']['summary'] + assert 'Ignoring invalid mgr config option test' in cephadm_module.health_checks[ + 'CEPHADM_INVALID_CONFIG_OPTION']['detail'] + + @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option") + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.module.CephadmOrchestrator.set_store") + def test_save_devices(self, _set_store, _run_cephadm, _get_foreign_ceph_option, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + entry_size = 65536 # default 64k size + _get_foreign_ceph_option.return_value = entry_size + + class FakeDev(): + def __init__(self, c: str = 'a'): + # using 1015 here makes the serialized string exactly 1024 bytes if c is one char + self.content = {c: c * 1015} + self.path = 'dev/vdc' + + def to_json(self): + return self.content + + def from_json(self, stuff): + return json.loads(stuff) + + def byte_len(s): + return len(s.encode('utf-8')) + + with with_host(cephadm_module, 'test'): + fake_devices = [FakeDev()] * 100 # should be ~100k + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 34], 'entries': 3})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 34]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 32]})), + ] + _set_store.assert_has_calls(expected_calls) + + fake_devices = [FakeDev()] * 300 # should be ~300k + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size * 4 + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 5 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50], 'entries': 6})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.3', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.4', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.5', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + ] + _set_store.assert_has_calls(expected_calls) + + fake_devices = [FakeDev()] * 62 # should be ~62k, just under cache size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 62], 'entries': 1})), + ] + _set_store.assert_has_calls(expected_calls) + + # should be ~64k but just over so it requires more entries + fake_devices = [FakeDev()] * 64 + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 22], 'entries': 3})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 22]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 20]})), + ] + _set_store.assert_has_calls(expected_calls) + + # test for actual content being correct using differing devices + entry_size = 3072 + _get_foreign_ceph_option.return_value = entry_size + fake_devices = [FakeDev('a'), FakeDev('b'), FakeDev('c'), FakeDev('d'), FakeDev('e')] + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev('a'), FakeDev('b')]], 'entries': 3})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev('c'), FakeDev('d')]]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev('e')]]})), + ] + _set_store.assert_has_calls(expected_calls) + + @mock.patch("cephadm.module.CephadmOrchestrator.get_store") + def test_load_devices(self, _get_store, cephadm_module: CephadmOrchestrator): + def _fake_store(key): + if key == 'host.test.devices.0': + return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 9], 'entries': 3}) + elif key == 'host.test.devices.1': + return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 7]}) + elif key == 'host.test.devices.2': + return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 4]}) + else: + raise Exception(f'Get store with unexpected value {key}') + + _get_store.side_effect = _fake_store + devs = cephadm_module.cache.load_host_devices('test') + assert devs == [Device('/path')] * 20 + + @mock.patch("cephadm.module.Inventory.__contains__") + def test_check_stray_host_cache_entry(self, _contains, cephadm_module: CephadmOrchestrator): + def _fake_inv(key): + if key in ['host1', 'node02', 'host.something.com']: + return True + return False + + _contains.side_effect = _fake_inv + assert cephadm_module.cache._get_host_cache_entry_status('host1') == HostCacheStatus.host + assert cephadm_module.cache._get_host_cache_entry_status( + 'host.something.com') == HostCacheStatus.host + assert cephadm_module.cache._get_host_cache_entry_status( + 'node02.devices.37') == HostCacheStatus.devices + assert cephadm_module.cache._get_host_cache_entry_status( + 'host.something.com.devices.0') == HostCacheStatus.devices + assert cephadm_module.cache._get_host_cache_entry_status('hostXXX') == HostCacheStatus.stray + assert cephadm_module.cache._get_host_cache_entry_status( + 'host.nothing.com') == HostCacheStatus.stray + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) + @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) + @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock()) + def test_nfs(self, cephadm_module): + with with_host(cephadm_module, 'test'): + ps = PlacementSpec(hosts=['test'], count=1) + spec = NFSServiceSpec( + service_id='name', + placement=ps) + unmanaged_spec = ServiceSpec.from_json(spec.to_json()) + unmanaged_spec.unmanaged = True + with with_service(cephadm_module, unmanaged_spec): + c = cephadm_module.add_daemon(spec) + [out] = wait(cephadm_module, c) + match_glob(out, "Deployed nfs.name.* on host 'test'") + + assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test') + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("subprocess.run", None) + @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock()) + @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4') + def test_iscsi(self, cephadm_module): + with with_host(cephadm_module, 'test'): + ps = PlacementSpec(hosts=['test'], count=1) + spec = IscsiServiceSpec( + service_id='name', + pool='pool', + api_user='user', + api_password='password', + placement=ps) + unmanaged_spec = ServiceSpec.from_json(spec.to_json()) + unmanaged_spec.unmanaged = True + with with_service(cephadm_module, unmanaged_spec): + + c = cephadm_module.add_daemon(spec) + [out] = wait(cephadm_module, c) + match_glob(out, "Deployed iscsi.name.* on host 'test'") + + assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test') + + @pytest.mark.parametrize( + "on_bool", + [ + True, + False + ] + ) + @pytest.mark.parametrize( + "fault_ident", + [ + 'fault', + 'ident' + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_blink_device_light(self, _run_cephadm, on_bool, fault_ident, cephadm_module): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + c = cephadm_module.blink_device_light(fault_ident, on_bool, [('test', '', 'dev')]) + on_off = 'on' if on_bool else 'off' + assert wait(cephadm_module, c) == [f'Set {fault_ident} light for test: {on_off}'] + _run_cephadm.assert_called_with('test', 'osd', 'shell', [ + '--', 'lsmcli', f'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok=True) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_blink_device_light_custom(self, _run_cephadm, cephadm_module): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + cephadm_module.set_store('blink_device_light_cmd', 'echo hello') + c = cephadm_module.blink_device_light('ident', True, [('test', '', '/dev/sda')]) + assert wait(cephadm_module, c) == ['Set ident light for test: on'] + _run_cephadm.assert_called_with('test', 'osd', 'shell', [ + '--', 'echo', 'hello'], error_ok=True) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_blink_device_light_custom_per_host(self, _run_cephadm, cephadm_module): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'mgr0'): + cephadm_module.set_store('mgr0/blink_device_light_cmd', + 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'') + c = cephadm_module.blink_device_light( + 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')]) + assert wait(cephadm_module, c) == [ + 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on'] + _run_cephadm.assert_called_with('mgr0', 'osd', 'shell', [ + '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784' + ], error_ok=True) + + @pytest.mark.parametrize( + "spec, meth", + [ + (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr), + (ServiceSpec('crash'), CephadmOrchestrator.apply_crash), + (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus), + (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana), + (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter), + (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager), + (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror), + (ServiceSpec('cephfs-mirror'), CephadmOrchestrator.apply_rbd_mirror), + (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds), + (ServiceSpec( + 'mds', service_id='fsname', + placement=PlacementSpec( + hosts=[HostPlacementSpec( + hostname='test', + name='fsname', + network='' + )] + ) + ), CephadmOrchestrator.apply_mds), + (RGWSpec(service_id='foo'), CephadmOrchestrator.apply_rgw), + (RGWSpec( + service_id='bar', + rgw_realm='realm', rgw_zone='zone', + placement=PlacementSpec( + hosts=[HostPlacementSpec( + hostname='test', + name='bar', + network='' + )] + ) + ), CephadmOrchestrator.apply_rgw), + (NFSServiceSpec( + service_id='name', + ), CephadmOrchestrator.apply_nfs), + (IscsiServiceSpec( + service_id='name', + pool='pool', + api_user='user', + api_password='password' + ), CephadmOrchestrator.apply_iscsi), + (CustomContainerSpec( + service_id='hello-world', + image='docker.io/library/hello-world:latest', + uid=65534, + gid=65534, + dirs=['foo/bar'], + files={ + 'foo/bar/xyz.conf': 'aaa\nbbb' + }, + bind_mounts=[[ + 'type=bind', + 'source=lib/modules', + 'destination=/lib/modules', + 'ro=true' + ]], + volume_mounts={ + 'foo/bar': '/foo/bar:Z' + }, + args=['--no-healthcheck'], + envs=['SECRET=password'], + ports=[8080, 8443] + ), CephadmOrchestrator.apply_container), + ] + ) + @mock.patch("subprocess.run", None) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) + @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock()) + @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) + @mock.patch("subprocess.run", mock.MagicMock()) + def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec, meth, 'test'): + pass + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_mds_config_purge(self, cephadm_module: CephadmOrchestrator): + spec = MDSSpec('mds', service_id='fsname', config={'test': 'foo'}) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec, host='test'): + ret, out, err = cephadm_module.check_mon_command({ + 'prefix': 'config get', + 'who': spec.service_name(), + 'key': 'mds_join_fs', + }) + assert out == 'fsname' + ret, out, err = cephadm_module.check_mon_command({ + 'prefix': 'config get', + 'who': spec.service_name(), + 'key': 'mds_join_fs', + }) + assert not out + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop") + def test_daemon_ok_to_stop(self, ok_to_stop, cephadm_module: CephadmOrchestrator): + spec = MDSSpec( + 'mds', + service_id='fsname', + placement=PlacementSpec(hosts=['host1', 'host2']), + config={'test': 'foo'} + ) + with with_host(cephadm_module, 'host1'), with_host(cephadm_module, 'host2'): + c = cephadm_module.apply_mds(spec) + out = wait(cephadm_module, c) + match_glob(out, "Scheduled mds.fsname update...") + CephadmServe(cephadm_module)._apply_all_services() + + [daemon] = cephadm_module.cache.daemons['host1'].keys() + + spec.placement.set_hosts(['host2']) + + ok_to_stop.side_effect = False + + c = cephadm_module.apply_mds(spec) + out = wait(cephadm_module, c) + match_glob(out, "Scheduled mds.fsname update...") + CephadmServe(cephadm_module)._apply_all_services() + + ok_to_stop.assert_called_with([daemon[4:]], force=True) + + assert_rm_daemon(cephadm_module, spec.service_name(), 'host1') # verifies ok-to-stop + assert_rm_daemon(cephadm_module, spec.service_name(), 'host2') + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_dont_touch_offline_or_maintenance_host_daemons(self, cephadm_module): + # test daemons on offline/maint hosts not removed when applying specs + # test daemons not added to hosts in maint/offline state + with with_host(cephadm_module, 'test1'): + with with_host(cephadm_module, 'test2'): + with with_host(cephadm_module, 'test3'): + with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*'))): + # should get a mgr on all 3 hosts + # CephadmServe(cephadm_module)._apply_all_services() + assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3 + + # put one host in offline state and one host in maintenance state + cephadm_module.offline_hosts = {'test2'} + cephadm_module.inventory._inventory['test3']['status'] = 'maintenance' + cephadm_module.inventory.save() + + # being in offline/maint mode should disqualify hosts from being + # candidates for scheduling + assert cephadm_module.cache.is_host_schedulable('test2') + assert cephadm_module.cache.is_host_schedulable('test3') + + assert cephadm_module.cache.is_host_unreachable('test2') + assert cephadm_module.cache.is_host_unreachable('test3') + + with with_service(cephadm_module, ServiceSpec('crash', placement=PlacementSpec(host_pattern='*'))): + # re-apply services. No mgr should be removed from maint/offline hosts + # crash daemon should only be on host not in maint/offline mode + CephadmServe(cephadm_module)._apply_all_services() + assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3 + assert len(cephadm_module.cache.get_daemons_by_type('crash')) == 1 + + cephadm_module.offline_hosts = {} + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop") + @mock.patch("cephadm.module.HostCache.get_daemon_types") + @mock.patch("cephadm.module.HostCache.get_hosts") + def test_maintenance_enter_success(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator): + hostname = 'host1' + _run_cephadm.side_effect = async_side_effect( + ([''], ['something\nsuccess - systemd target xxx disabled'], 0)) + _host_ok.return_value = 0, 'it is okay' + _get_daemon_types.return_value = ['crash'] + _hosts.return_value = [hostname, 'other_host'] + cephadm_module.inventory.add_host(HostSpec(hostname)) + # should not raise an error + retval = cephadm_module.enter_host_maintenance(hostname) + assert retval.result_str().startswith('Daemons for Ceph cluster') + assert not retval.exception_str + assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance' + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop") + @mock.patch("cephadm.module.HostCache.get_daemon_types") + @mock.patch("cephadm.module.HostCache.get_hosts") + def test_maintenance_enter_failure(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator): + hostname = 'host1' + _run_cephadm.side_effect = async_side_effect( + ([''], ['something\nfailed - disable the target'], 0)) + _host_ok.return_value = 0, 'it is okay' + _get_daemon_types.return_value = ['crash'] + _hosts.return_value = [hostname, 'other_host'] + cephadm_module.inventory.add_host(HostSpec(hostname)) + + with pytest.raises(OrchestratorError, match='Failed to place host1 into maintenance for cluster fsid'): + cephadm_module.enter_host_maintenance(hostname) + + assert not cephadm_module.inventory._inventory[hostname]['status'] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop") + @mock.patch("cephadm.module.HostCache.get_daemon_types") + @mock.patch("cephadm.module.HostCache.get_hosts") + def test_maintenance_enter_i_really_mean_it(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator): + hostname = 'host1' + err_str = 'some kind of error' + _run_cephadm.side_effect = async_side_effect( + ([''], ['something\nfailed - disable the target'], 0)) + _host_ok.return_value = 1, err_str + _get_daemon_types.return_value = ['mon'] + _hosts.return_value = [hostname, 'other_host'] + cephadm_module.inventory.add_host(HostSpec(hostname)) + + with pytest.raises(OrchestratorError, match=err_str): + cephadm_module.enter_host_maintenance(hostname) + assert not cephadm_module.inventory._inventory[hostname]['status'] + + with pytest.raises(OrchestratorError, match=err_str): + cephadm_module.enter_host_maintenance(hostname, force=True) + assert not cephadm_module.inventory._inventory[hostname]['status'] + + retval = cephadm_module.enter_host_maintenance(hostname, force=True, yes_i_really_mean_it=True) + assert retval.result_str().startswith('Daemons for Ceph cluster') + assert not retval.exception_str + assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance' + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.module.HostCache.get_daemon_types") + @mock.patch("cephadm.module.HostCache.get_hosts") + def test_maintenance_exit_success(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator): + hostname = 'host1' + _run_cephadm.side_effect = async_side_effect(([''], [ + 'something\nsuccess - systemd target xxx enabled and started'], 0)) + _get_daemon_types.return_value = ['crash'] + _hosts.return_value = [hostname, 'other_host'] + cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance')) + # should not raise an error + retval = cephadm_module.exit_host_maintenance(hostname) + assert retval.result_str().startswith('Ceph cluster') + assert not retval.exception_str + assert not cephadm_module.inventory._inventory[hostname]['status'] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.module.HostCache.get_daemon_types") + @mock.patch("cephadm.module.HostCache.get_hosts") + def test_maintenance_exit_failure(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator): + hostname = 'host1' + _run_cephadm.side_effect = async_side_effect( + ([''], ['something\nfailed - unable to enable the target'], 0)) + _get_daemon_types.return_value = ['crash'] + _hosts.return_value = [hostname, 'other_host'] + cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance')) + + with pytest.raises(OrchestratorError, match='Failed to exit maintenance state for host host1, cluster fsid'): + cephadm_module.exit_host_maintenance(hostname) + + assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance' + + @mock.patch("cephadm.ssh.SSHManager._remote_connection") + @mock.patch("cephadm.ssh.SSHManager._execute_command") + @mock.patch("cephadm.ssh.SSHManager._check_execute_command") + @mock.patch("cephadm.ssh.SSHManager._write_remote_file") + def test_etc_ceph(self, _write_file, check_execute_command, execute_command, remote_connection, cephadm_module): + _write_file.side_effect = async_side_effect(None) + check_execute_command.side_effect = async_side_effect('') + execute_command.side_effect = async_side_effect(('{}', '', 0)) + remote_connection.side_effect = async_side_effect(mock.Mock()) + + assert cephadm_module.manage_etc_ceph_ceph_conf is False + + with with_host(cephadm_module, 'test'): + assert '/etc/ceph/ceph.conf' not in cephadm_module.cache.get_host_client_files('test') + + with with_host(cephadm_module, 'test'): + cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True) + cephadm_module.config_notify() + assert cephadm_module.manage_etc_ceph_ceph_conf is True + + CephadmServe(cephadm_module)._write_all_client_files() + # Make sure both ceph conf locations (default and per fsid) are called + _write_file.assert_has_calls([mock.call('test', '/etc/ceph/ceph.conf', b'', + 0o644, 0, 0, None), + mock.call('test', '/var/lib/ceph/fsid/config/ceph.conf', b'', + 0o644, 0, 0, None)] + ) + ceph_conf_files = cephadm_module.cache.get_host_client_files('test') + assert len(ceph_conf_files) == 2 + assert '/etc/ceph/ceph.conf' in ceph_conf_files + assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files + + # set extra config and expect that we deploy another ceph.conf + cephadm_module._set_extra_ceph_conf('[mon]\nk=v') + CephadmServe(cephadm_module)._write_all_client_files() + _write_file.assert_has_calls([mock.call('test', + '/etc/ceph/ceph.conf', + b'[mon]\nk=v\n', 0o644, 0, 0, None), + mock.call('test', + '/var/lib/ceph/fsid/config/ceph.conf', + b'[mon]\nk=v\n', 0o644, 0, 0, None)]) + # reload + cephadm_module.cache.last_client_files = {} + cephadm_module.cache.load() + + ceph_conf_files = cephadm_module.cache.get_host_client_files('test') + assert len(ceph_conf_files) == 2 + assert '/etc/ceph/ceph.conf' in ceph_conf_files + assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files + + # Make sure, _check_daemons does a redeploy due to monmap change: + f1_before_digest = cephadm_module.cache.get_host_client_files('test')[ + '/etc/ceph/ceph.conf'][0] + f2_before_digest = cephadm_module.cache.get_host_client_files( + 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0] + cephadm_module._set_extra_ceph_conf('[mon]\nk2=v2') + CephadmServe(cephadm_module)._write_all_client_files() + f1_after_digest = cephadm_module.cache.get_host_client_files('test')[ + '/etc/ceph/ceph.conf'][0] + f2_after_digest = cephadm_module.cache.get_host_client_files( + 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0] + assert f1_before_digest != f1_after_digest + assert f2_before_digest != f2_after_digest + + @mock.patch("cephadm.inventory.HostCache.get_host_client_files") + def test_dont_write_client_files_to_unreachable_hosts(self, _get_client_files, cephadm_module): + cephadm_module.inventory.add_host(HostSpec('host1', '1.2.3.1')) # online + cephadm_module.inventory.add_host(HostSpec('host2', '1.2.3.2')) # maintenance + cephadm_module.inventory.add_host(HostSpec('host3', '1.2.3.3')) # offline + + # mark host2 as maintenance and host3 as offline + cephadm_module.inventory._inventory['host2']['status'] = 'maintenance' + cephadm_module.offline_hosts.add('host3') + + # verify host2 and host3 are correctly marked as unreachable but host1 is not + assert not cephadm_module.cache.is_host_unreachable('host1') + assert cephadm_module.cache.is_host_unreachable('host2') + assert cephadm_module.cache.is_host_unreachable('host3') + + _get_client_files.side_effect = Exception('Called _get_client_files') + + # with the online host, should call _get_client_files which + # we have setup to raise an Exception + with pytest.raises(Exception, match='Called _get_client_files'): + CephadmServe(cephadm_module)._write_client_files({}, 'host1') + + # for the maintenance and offline host, _get_client_files should + # not be called and it should just return immediately with nothing + # having been raised + CephadmServe(cephadm_module)._write_client_files({}, 'host2') + CephadmServe(cephadm_module)._write_client_files({}, 'host3') + + def test_etc_ceph_init(self): + with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m: + assert m.manage_etc_ceph_ceph_conf is True + + @mock.patch("cephadm.CephadmOrchestrator.check_mon_command") + @mock.patch("cephadm.CephadmOrchestrator.extra_ceph_conf") + def test_extra_ceph_conf(self, _extra_ceph_conf, _check_mon_cmd, cephadm_module: CephadmOrchestrator): + # settings put into the [global] section in the extra conf + # need to be appended to existing [global] section in given + # minimal ceph conf, but anything in another section (e.g. [mon]) + # needs to continue to be its own section + + # this is the conf "ceph generate-minimal-conf" will return in this test + _check_mon_cmd.return_value = (0, """[global] +global_k1 = global_v1 +global_k2 = global_v2 +[mon] +mon_k1 = mon_v1 +[osd] +osd_k1 = osd_v1 +osd_k2 = osd_v2 +""", '') + + # test with extra ceph conf that has some of the sections from minimal conf + _extra_ceph_conf.return_value = CephadmOrchestrator.ExtraCephConf(conf="""[mon] +mon_k2 = mon_v2 +[global] +global_k3 = global_v3 +""", last_modified=datetime_now()) + + expected_combined_conf = """[global] +global_k1 = global_v1 +global_k2 = global_v2 +global_k3 = global_v3 + +[mon] +mon_k1 = mon_v1 +mon_k2 = mon_v2 + +[osd] +osd_k1 = osd_v1 +osd_k2 = osd_v2 +""" + + assert cephadm_module.get_minimal_ceph_conf() == expected_combined_conf + + def test_client_keyrings_special_host_labels(self, cephadm_module): + cephadm_module.inventory.add_host(HostSpec('host1', labels=['keyring1'])) + cephadm_module.inventory.add_host(HostSpec('host2', labels=['keyring1', SpecialHostLabels.DRAIN_DAEMONS])) + cephadm_module.inventory.add_host(HostSpec('host3', labels=['keyring1', SpecialHostLabels.DRAIN_DAEMONS, SpecialHostLabels.DRAIN_CONF_KEYRING])) + # hosts need to be marked as having had refresh to be available for placement + # so "refresh" with empty daemon list + cephadm_module.cache.update_host_daemons('host1', {}) + cephadm_module.cache.update_host_daemons('host2', {}) + cephadm_module.cache.update_host_daemons('host3', {}) + + assert 'host1' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()] + assert 'host2' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()] + assert 'host3' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()] + + assert 'host1' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()] + assert 'host2' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()] + assert 'host3' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()] + + cephadm_module.keys.update(ClientKeyringSpec('keyring1', PlacementSpec(label='keyring1'))) + + with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: + _mon_cmd.return_value = (0, 'real-keyring', '') + client_files = CephadmServe(cephadm_module)._calc_client_files() + assert 'host1' in client_files.keys() + assert '/etc/ceph/ceph.keyring1.keyring' in client_files['host1'].keys() + assert 'host2' in client_files.keys() + assert '/etc/ceph/ceph.keyring1.keyring' in client_files['host2'].keys() + assert 'host3' not in client_files.keys() + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_registry_login(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + def check_registry_credentials(url, username, password): + assert json.loads(cephadm_module.get_store('registry_credentials')) == { + 'url': url, 'username': username, 'password': password} + + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + # test successful login with valid args + code, out, err = cephadm_module.registry_login('test-url', 'test-user', 'test-password') + assert out == 'registry login scheduled' + assert err == '' + check_registry_credentials('test-url', 'test-user', 'test-password') + + # test bad login attempt with invalid args + code, out, err = cephadm_module.registry_login('bad-args') + assert err == ("Invalid arguments. Please provide arguments " + "or -i ") + check_registry_credentials('test-url', 'test-user', 'test-password') + + # test bad login using invalid json file + code, out, err = cephadm_module.registry_login( + None, None, None, '{"bad-json": "bad-json"}') + assert err == ("json provided for custom registry login did not include all necessary fields. " + "Please setup json file as\n" + "{\n" + " \"url\": \"REGISTRY_URL\",\n" + " \"username\": \"REGISTRY_USERNAME\",\n" + " \"password\": \"REGISTRY_PASSWORD\"\n" + "}\n") + check_registry_credentials('test-url', 'test-user', 'test-password') + + # test good login using valid json file + good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", " + " \"password\": \"" + "json-pass" + "\"}") + code, out, err = cephadm_module.registry_login(None, None, None, good_json) + assert out == 'registry login scheduled' + assert err == '' + check_registry_credentials('json-url', 'json-user', 'json-pass') + + # test bad login where args are valid but login command fails + _run_cephadm.side_effect = async_side_effect(('{}', 'error', 1)) + code, out, err = cephadm_module.registry_login('fail-url', 'fail-user', 'fail-password') + assert err == 'Host test failed to login to fail-url as fail-user with given password' + check_registry_credentials('json-url', 'json-user', 'json-pass') + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({ + 'image_id': 'image_id', + 'repo_digests': ['image@repo_digest'], + }))) + @pytest.mark.parametrize("use_repo_digest", + [ + False, + True + ]) + def test_upgrade_run(self, use_repo_digest, cephadm_module: CephadmOrchestrator): + cephadm_module.use_repo_digest = use_repo_digest + + with with_host(cephadm_module, 'test', refresh_hosts=False): + cephadm_module.set_container_image('global', 'image') + + if use_repo_digest: + + CephadmServe(cephadm_module).convert_tags_to_repo_digest() + + _, image, _ = cephadm_module.check_mon_command({ + 'prefix': 'config get', + 'who': 'global', + 'key': 'container_image', + }) + if use_repo_digest: + assert image == 'image@repo_digest' + else: + assert image == 'image' + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ceph_volume_no_filter_for_batch(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + error_message = """cephadm exited with an error code: 1, stderr:/usr/bin/podman:stderr usage: ceph-volume inventory [-h] [--format {plain,json,json-pretty}] [path]/usr/bin/podman:stderr ceph-volume inventory: error: unrecognized arguments: --filter-for-batch +Traceback (most recent call last): + File "", line 6112, in + File "", line 1299, in _infer_fsid + File "", line 1382, in _infer_image + File "", line 3612, in command_ceph_volume + File "", line 1061, in call_throws""" + + with with_host(cephadm_module, 'test'): + _run_cephadm.reset_mock() + _run_cephadm.side_effect = OrchestratorError(error_message) + + s = CephadmServe(cephadm_module)._refresh_host_devices('test') + assert s == 'host test `cephadm ceph-volume` failed: ' + error_message + + assert _run_cephadm.mock_calls == [ + mock.call('test', 'osd', 'ceph-volume', + ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image='', + no_fsid=False, error_ok=False, log_output=False), + mock.call('test', 'osd', 'ceph-volume', + ['--', 'inventory', '--format=json-pretty'], image='', + no_fsid=False, error_ok=False, log_output=False), + ] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_osd_activate_datadevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test', refresh_hosts=False): + with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1): + pass + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_osd_activate_datadevice_fail(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test', refresh_hosts=False): + cephadm_module.mock_store_set('_ceph_get', 'osd_map', { + 'osds': [ + { + 'osd': 1, + 'up_from': 0, + 'uuid': 'uuid' + } + ] + }) + + ceph_volume_lvm_list = { + '1': [{ + 'tags': { + 'ceph.cluster_fsid': cephadm_module._cluster_fsid, + 'ceph.osd_fsid': 'uuid' + }, + 'type': 'data' + }] + } + _run_cephadm.reset_mock(return_value=True, side_effect=True) + + async def _r_c(*args, **kwargs): + if 'ceph-volume' in args: + return (json.dumps(ceph_volume_lvm_list), '', 0) + else: + assert ['_orch', 'deploy'] in args + raise OrchestratorError("let's fail somehow") + _run_cephadm.side_effect = _r_c + assert cephadm_module._osd_activate( + ['test']).stderr == "let's fail somehow" + with pytest.raises(AssertionError): + cephadm_module.assert_issued_mon_command({ + 'prefix': 'auth rm', + 'entity': 'osd.1', + }) + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_osd_activate_datadevice_dbdevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test', refresh_hosts=False): + + async def _ceph_volume_list(s, host, entity, cmd, **kwargs): + logging.info(f'ceph-volume cmd: {cmd}') + if 'raw' in cmd: + return json.dumps({ + "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": { + "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8", + "device": "/dev/loop0", + "osd_id": 21, + "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5", + "type": "bluestore" + }, + }), '', 0 + if 'lvm' in cmd: + return json.dumps({ + '1': [{ + 'tags': { + 'ceph.cluster_fsid': cephadm_module._cluster_fsid, + 'ceph.osd_fsid': 'uuid' + }, + 'type': 'data' + }, { + 'tags': { + 'ceph.cluster_fsid': cephadm_module._cluster_fsid, + 'ceph.osd_fsid': 'uuid' + }, + 'type': 'db' + }] + }), '', 0 + return '{}', '', 0 + + with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list): + pass + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_osd_count(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + dg = DriveGroupSpec(service_id='', data_devices=DeviceSelection(all=True)) + with with_host(cephadm_module, 'test', refresh_hosts=False): + with with_service(cephadm_module, dg, host='test'): + with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1): + assert wait(cephadm_module, cephadm_module.describe_service())[0].size == 1 + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator): + with pytest.raises(OrchestratorError): + with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False): + cephadm_module.inventory.add_label('test', SpecialHostLabels.ADMIN) + pass + assert False + with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True): + with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False): + cephadm_module.inventory.add_label('test2', SpecialHostLabels.ADMIN) + + @pytest.mark.parametrize("facts, settings, expected_value", + [ + # All options are available on all hosts + ( + { + "host1": + { + "sysctl_options": + { + 'opt1': 'val1', + 'opt2': 'val2', + } + }, + "host2": + { + "sysctl_options": + { + 'opt1': '', + 'opt2': '', + } + }, + }, + {'opt1', 'opt2'}, # settings + {'host1': [], 'host2': []} # expected_value + ), + # opt1 is missing on host 1, opt2 is missing on host2 + ({ + "host1": + { + "sysctl_options": + { + 'opt2': '', + 'optX': '', + } + }, + "host2": + { + "sysctl_options": + { + 'opt1': '', + 'opt3': '', + 'opt4': '', + } + }, + }, + {'opt1', 'opt2'}, # settings + {'host1': ['opt1'], 'host2': ['opt2']} # expected_value + ), + # All options are missing on all hosts + ({ + "host1": + { + "sysctl_options": + { + } + }, + "host2": + { + "sysctl_options": + { + } + }, + }, + {'opt1', 'opt2'}, # settings + {'host1': ['opt1', 'opt2'], 'host2': [ + 'opt1', 'opt2']} # expected_value + ), + ] + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_tuned_profiles_settings_validation(self, facts, settings, expected_value, cephadm_module): + with with_host(cephadm_module, 'test'): + spec = mock.Mock() + spec.settings = sorted(settings) + spec.placement.filter_matching_hostspecs = mock.Mock() + spec.placement.filter_matching_hostspecs.return_value = ['host1', 'host2'] + cephadm_module.cache.facts = facts + assert cephadm_module._validate_tunedprofile_settings(spec) == expected_value + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) + def test_tuned_profiles_validation(self, cephadm_module): + with with_host(cephadm_module, 'test'): + + with pytest.raises(OrchestratorError, match="^Invalid placement specification.+"): + spec = mock.Mock() + spec.settings = {'a': 'b'} + spec.placement = PlacementSpec(hosts=[]) + cephadm_module._validate_tuned_profile_spec(spec) + + with pytest.raises(OrchestratorError, match="Invalid spec: settings section cannot be empty."): + spec = mock.Mock() + spec.settings = {} + spec.placement = PlacementSpec(hosts=['host1', 'host2']) + cephadm_module._validate_tuned_profile_spec(spec) + + with pytest.raises(OrchestratorError, match="^Placement 'count' field is no supported .+"): + spec = mock.Mock() + spec.settings = {'a': 'b'} + spec.placement = PlacementSpec(count=1) + cephadm_module._validate_tuned_profile_spec(spec) + + with pytest.raises(OrchestratorError, match="^Placement 'count_per_host' field is no supported .+"): + spec = mock.Mock() + spec.settings = {'a': 'b'} + spec.placement = PlacementSpec(count_per_host=1, label='foo') + cephadm_module._validate_tuned_profile_spec(spec) + + with pytest.raises(OrchestratorError, match="^Found invalid host"): + spec = mock.Mock() + spec.settings = {'a': 'b'} + spec.placement = PlacementSpec(hosts=['host1', 'host2']) + cephadm_module.inventory = mock.Mock() + cephadm_module.inventory.all_specs = mock.Mock( + return_value=[mock.Mock().hostname, mock.Mock().hostname]) + cephadm_module._validate_tuned_profile_spec(spec) + + def test_set_unmanaged(self, cephadm_module): + cephadm_module.spec_store._specs['crash'] = ServiceSpec('crash', unmanaged=False) + assert not cephadm_module.spec_store._specs['crash'].unmanaged + cephadm_module.spec_store.set_unmanaged('crash', True) + assert cephadm_module.spec_store._specs['crash'].unmanaged + cephadm_module.spec_store.set_unmanaged('crash', False) + assert not cephadm_module.spec_store._specs['crash'].unmanaged + + def test_inventory_known_hostnames(self, cephadm_module): + cephadm_module.inventory.add_host(HostSpec('host1', '1.2.3.1')) + cephadm_module.inventory.add_host(HostSpec('host2', '1.2.3.2')) + cephadm_module.inventory.add_host(HostSpec('host3.domain', '1.2.3.3')) + cephadm_module.inventory.add_host(HostSpec('host4.domain', '1.2.3.4')) + cephadm_module.inventory.add_host(HostSpec('host5', '1.2.3.5')) + + # update_known_hostname expects args to be + # as are gathered from cephadm gather-facts. Although, passing the + # names in the wrong order should actually have no effect on functionality + cephadm_module.inventory.update_known_hostnames('host1', 'host1', 'host1.domain') + cephadm_module.inventory.update_known_hostnames('host2.domain', 'host2', 'host2.domain') + cephadm_module.inventory.update_known_hostnames('host3', 'host3', 'host3.domain') + cephadm_module.inventory.update_known_hostnames('host4.domain', 'host4', 'host4.domain') + cephadm_module.inventory.update_known_hostnames('host5', 'host5', 'host5') + + assert 'host1' in cephadm_module.inventory + assert 'host1.domain' in cephadm_module.inventory + assert cephadm_module.inventory.get_addr('host1') == '1.2.3.1' + assert cephadm_module.inventory.get_addr('host1.domain') == '1.2.3.1' + + assert 'host2' in cephadm_module.inventory + assert 'host2.domain' in cephadm_module.inventory + assert cephadm_module.inventory.get_addr('host2') == '1.2.3.2' + assert cephadm_module.inventory.get_addr('host2.domain') == '1.2.3.2' + + assert 'host3' in cephadm_module.inventory + assert 'host3.domain' in cephadm_module.inventory + assert cephadm_module.inventory.get_addr('host3') == '1.2.3.3' + assert cephadm_module.inventory.get_addr('host3.domain') == '1.2.3.3' + + assert 'host4' in cephadm_module.inventory + assert 'host4.domain' in cephadm_module.inventory + assert cephadm_module.inventory.get_addr('host4') == '1.2.3.4' + assert cephadm_module.inventory.get_addr('host4.domain') == '1.2.3.4' + + assert 'host4.otherdomain' not in cephadm_module.inventory + with pytest.raises(OrchestratorError): + cephadm_module.inventory.get_addr('host4.otherdomain') + + assert 'host5' in cephadm_module.inventory + assert cephadm_module.inventory.get_addr('host5') == '1.2.3.5' + with pytest.raises(OrchestratorError): + cephadm_module.inventory.get_addr('host5.domain') + + def test_async_timeout_handler(self, cephadm_module): + cephadm_module.default_cephadm_command_timeout = 900 + + async def _timeout(): + raise asyncio.TimeoutError + + with pytest.raises(OrchestratorError, match=r'Command timed out \(default 900 second timeout\)'): + with cephadm_module.async_timeout_handler(): + cephadm_module.wait_async(_timeout()) + + with pytest.raises(OrchestratorError, match=r'Command timed out on host hostA \(default 900 second timeout\)'): + with cephadm_module.async_timeout_handler('hostA'): + cephadm_module.wait_async(_timeout()) + + with pytest.raises(OrchestratorError, match=r'Command "testing" timed out \(default 900 second timeout\)'): + with cephadm_module.async_timeout_handler(cmd='testing'): + cephadm_module.wait_async(_timeout()) + + with pytest.raises(OrchestratorError, match=r'Command "testing" timed out on host hostB \(default 900 second timeout\)'): + with cephadm_module.async_timeout_handler('hostB', 'testing'): + cephadm_module.wait_async(_timeout()) + + with pytest.raises(OrchestratorError, match=r'Command timed out \(non-default 111 second timeout\)'): + with cephadm_module.async_timeout_handler(timeout=111): + cephadm_module.wait_async(_timeout()) + + with pytest.raises(OrchestratorError, match=r'Command "very slow" timed out on host hostC \(non-default 999 second timeout\)'): + with cephadm_module.async_timeout_handler('hostC', 'very slow', 999): + cephadm_module.wait_async(_timeout()) + + @mock.patch("cephadm.CephadmOrchestrator.remove_osds") + @mock.patch("cephadm.CephadmOrchestrator.add_host_label", lambda *a, **kw: None) + @mock.patch("cephadm.inventory.HostCache.get_daemons_by_host", lambda *a, **kw: []) + def test_host_drain_zap(self, _rm_osds, cephadm_module): + # pass force=true in these tests to bypass _admin label check + cephadm_module.drain_host('host1', force=True, zap_osd_devices=False) + assert _rm_osds.called_with([], zap=False) + + cephadm_module.drain_host('host1', force=True, zap_osd_devices=True) + assert _rm_osds.called_with([], zap=True) + + def test_process_ls_output(self, cephadm_module): + sample_ls_output = """[ + { + "style": "cephadm:v1", + "name": "mon.vm-00", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mon.vm-00", + "enabled": true, + "state": "running", + "service_name": "mon", + "ports": [], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "memory_request": null, + "memory_limit": null, + "container_id": "b170b964a6e2918955362eb36195627c6086d3f859d4ebce2ee13f3ee4738733", + "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3", + "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55", + "container_image_digests": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "memory_usage": 56214159, + "cpu_percentage": "2.32%", + "version": "18.0.0-5185-g7b3a4f2b", + "started": "2023-09-22T22:31:11.752300Z", + "created": "2023-09-22T22:15:24.121387Z", + "deployed": "2023-09-22T22:31:10.383431Z", + "configured": "2023-09-22T22:31:11.859440Z" + }, + { + "style": "cephadm:v1", + "name": "mgr.vm-00.mpexeg", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mgr.vm-00.mpexeg", + "enabled": true, + "state": "running", + "service_name": "mgr", + "ports": [ + 8443, + 9283, + 8765 + ], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "memory_request": null, + "memory_limit": null, + "container_id": "6e7756cef553a25a2a84227e8755d3d25046b9cd8758b23c698d34b3af895242", + "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3", + "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55", + "container_image_digests": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "memory_usage": 529740595, + "cpu_percentage": "8.35%", + "version": "18.0.0-5185-g7b3a4f2b", + "started": "2023-09-22T22:30:18.587021Z", + "created": "2023-09-22T22:15:29.101409Z", + "deployed": "2023-09-22T22:30:17.339114Z", + "configured": "2023-09-22T22:30:18.758122Z" + }, + { + "style": "cephadm:v1", + "name": "agent.vm-00", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@agent.vm-00", + "enabled": true, + "state": "running", + "service_name": "agent", + "ports": [], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "container_id": null, + "container_image_name": null, + "container_image_id": null, + "container_image_digests": null, + "version": null, + "started": null, + "created": "2023-09-22T22:33:34.708289Z", + "deployed": null, + "configured": "2023-09-22T22:33:34.722289Z" + }, + { + "style": "cephadm:v1", + "name": "osd.0", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@osd.0", + "enabled": true, + "state": "running", + "service_name": "osd.foo", + "ports": [], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "memory_request": null, + "memory_limit": null, + "container_id": "93f71c60820b86901a45b3b1fe3dba3e3e677b37fd22310b7e7da3f67bb8ccd6", + "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3", + "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55", + "container_image_digests": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "memory_usage": 73410805, + "cpu_percentage": "6.54%", + "version": "18.0.0-5185-g7b3a4f2b", + "started": "2023-09-22T22:41:29.019587Z", + "created": "2023-09-22T22:41:03.615080Z", + "deployed": "2023-09-22T22:41:24.965222Z", + "configured": "2023-09-22T22:41:29.119250Z" + } +]""" + + now = str_to_datetime('2023-09-22T22:45:29.119250Z') + cephadm_module._cluster_fsid = '588f83ba-5995-11ee-9e94-52540057a206' + with mock.patch("cephadm.module.datetime_now", lambda: now): + cephadm_module._process_ls_output('vm-00', json.loads(sample_ls_output)) + assert 'vm-00' in cephadm_module.cache.daemons + assert 'mon.vm-00' in cephadm_module.cache.daemons['vm-00'] + assert 'mgr.vm-00.mpexeg' in cephadm_module.cache.daemons['vm-00'] + assert 'agent.vm-00' in cephadm_module.cache.daemons['vm-00'] + assert 'osd.0' in cephadm_module.cache.daemons['vm-00'] + + daemons = cephadm_module.cache.get_daemons_by_host('vm-00') + c_img_ids = [dd.container_image_id for dd in daemons if dd.daemon_type != 'agent'] + assert all(c_img_id == '674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55' for c_img_id in c_img_ids) + last_refreshes = [dd.last_refresh for dd in daemons] + assert all(lrf == now for lrf in last_refreshes) + versions = [dd.version for dd in daemons if dd.daemon_type != 'agent'] + assert all(version == '18.0.0-5185-g7b3a4f2b' for version in versions) + + osd = cephadm_module.cache.get_daemons_by_type('osd', 'vm-00')[0] + assert osd.cpu_percentage == '6.54%' + assert osd.memory_usage == 73410805 + assert osd.created == str_to_datetime('2023-09-22T22:41:03.615080Z') diff --git a/src/pybind/mgr/cephadm/tests/test_completion.py b/src/pybind/mgr/cephadm/tests/test_completion.py new file mode 100644 index 000000000..327c12d2a --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_completion.py @@ -0,0 +1,40 @@ +import pytest + +from ..module import forall_hosts + + +class TestCompletion(object): + + @pytest.mark.parametrize("input,expected", [ + ([], []), + ([1], ["(1,)"]), + (["hallo"], ["('hallo',)"]), + ("hi", ["('h',)", "('i',)"]), + (list(range(5)), [str((x, )) for x in range(5)]), + ([(1, 2), (3, 4)], ["(1, 2)", "(3, 4)"]), + ]) + def test_async_map(self, input, expected, cephadm_module): + @forall_hosts + def run_forall(*args): + return str(args) + assert run_forall(input) == expected + + @pytest.mark.parametrize("input,expected", [ + ([], []), + ([1], ["(1,)"]), + (["hallo"], ["('hallo',)"]), + ("hi", ["('h',)", "('i',)"]), + (list(range(5)), [str((x, )) for x in range(5)]), + ([(1, 2), (3, 4)], ["(1, 2)", "(3, 4)"]), + ]) + def test_async_map_self(self, input, expected, cephadm_module): + class Run(object): + def __init__(self): + self.attr = 1 + + @forall_hosts + def run_forall(self, *args): + assert self.attr == 1 + return str(args) + + assert Run().run_forall(input) == expected diff --git a/src/pybind/mgr/cephadm/tests/test_configchecks.py b/src/pybind/mgr/cephadm/tests/test_configchecks.py new file mode 100644 index 000000000..3cae0a27d --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_configchecks.py @@ -0,0 +1,668 @@ +import copy +import json +import logging +import ipaddress +import pytest +import uuid + +from time import time as now + +from ..configchecks import CephadmConfigChecks +from ..inventory import HostCache +from ..upgrade import CephadmUpgrade, UpgradeState +from orchestrator import DaemonDescription + +from typing import List, Dict, Any, Optional + +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +host_sample = { + "arch": "x86_64", + "bios_date": "04/01/2014", + "bios_version": "F2", + "cpu_cores": 16, + "cpu_count": 2, + "cpu_load": { + "15min": 0.0, + "1min": 0.01, + "5min": 0.01 + }, + "cpu_model": "Intel® Xeon® Processor E5-2698 v3", + "cpu_threads": 64, + "flash_capacity": "4.0TB", + "flash_capacity_bytes": 4000797868032, + "flash_count": 2, + "flash_list": [ + { + "description": "ATA CT2000MX500SSD1 (2.0TB)", + "dev_name": "sda", + "disk_size_bytes": 2000398934016, + "model": "CT2000MX500SSD1", + "rev": "023", + "vendor": "ATA", + "wwid": "t10.ATA CT2000MX500SSD1 193023156DE0" + }, + { + "description": "ATA CT2000MX500SSD1 (2.0TB)", + "dev_name": "sdb", + "disk_size_bytes": 2000398934016, + "model": "CT2000MX500SSD1", + "rev": "023", + "vendor": "ATA", + "wwid": "t10.ATA CT2000MX500SSD1 193023156DE0" + }, + ], + "hdd_capacity": "16.0TB", + "hdd_capacity_bytes": 16003148120064, + "hdd_count": 4, + "hdd_list": [ + { + "description": "ST4000VN008-2DR1 (4.0TB)", + "dev_name": "sdc", + "disk_size_bytes": 4000787030016, + "model": "ST4000VN008-2DR1", + "rev": "SC60", + "vendor": "ATA", + "wwid": "t10.ATA ST4000VN008-2DR1 Z340EPBJ" + }, + { + "description": "ST4000VN008-2DR1 (4.0TB)", + "dev_name": "sdd", + "disk_size_bytes": 4000787030016, + "model": "ST4000VN008-2DR1", + "rev": "SC60", + "vendor": "ATA", + "wwid": "t10.ATA ST4000VN008-2DR1 Z340EPBJ" + }, + { + "description": "ST4000VN008-2DR1 (4.0TB)", + "dev_name": "sde", + "disk_size_bytes": 4000787030016, + "model": "ST4000VN008-2DR1", + "rev": "SC60", + "vendor": "ATA", + "wwid": "t10.ATA ST4000VN008-2DR1 Z340EPBJ" + }, + { + "description": "ST4000VN008-2DR1 (4.0TB)", + "dev_name": "sdf", + "disk_size_bytes": 4000787030016, + "model": "ST4000VN008-2DR1", + "rev": "SC60", + "vendor": "ATA", + "wwid": "t10.ATA ST4000VN008-2DR1 Z340EPBJ" + }, + ], + "hostname": "dummy", + "interfaces": { + "eth0": { + "driver": "e1000e", + "iftype": "physical", + "ipv4_address": "10.7.17.1/24", + "ipv6_address": "fe80::215:17ff:feab:50e2/64", + "lower_devs_list": [], + "mtu": 9000, + "nic_type": "ethernet", + "operstate": "up", + "speed": 1000, + "upper_devs_list": [], + }, + "eth1": { + "driver": "e1000e", + "iftype": "physical", + "ipv4_address": "10.7.18.1/24", + "ipv6_address": "fe80::215:17ff:feab:50e2/64", + "lower_devs_list": [], + "mtu": 9000, + "nic_type": "ethernet", + "operstate": "up", + "speed": 1000, + "upper_devs_list": [], + }, + "eth2": { + "driver": "r8169", + "iftype": "physical", + "ipv4_address": "10.7.19.1/24", + "ipv6_address": "fe80::76d4:35ff:fe58:9a79/64", + "lower_devs_list": [], + "mtu": 1500, + "nic_type": "ethernet", + "operstate": "up", + "speed": 1000, + "upper_devs_list": [] + }, + }, + "kernel": "4.18.0-240.10.1.el8_3.x86_64", + "kernel_parameters": { + "net.ipv4.ip_nonlocal_bind": "0", + }, + "kernel_security": { + "SELINUX": "enforcing", + "SELINUXTYPE": "targeted", + "description": "SELinux: Enabled(enforcing, targeted)", + "type": "SELinux" + }, + "memory_available_kb": 19489212, + "memory_free_kb": 245164, + "memory_total_kb": 32900916, + "model": "StorageHeavy", + "nic_count": 3, + "operating_system": "Red Hat Enterprise Linux 8.3 (Ootpa)", + "subscribed": "Yes", + "system_uptime": 777600.0, + "timestamp": now(), + "vendor": "Ceph Servers Inc", +} + + +def role_list(n: int) -> List[str]: + if n == 1: + return ['mon', 'mgr', 'osd'] + if n in [2, 3]: + return ['mon', 'mds', 'osd'] + + return ['osd'] + + +def generate_testdata(count: int = 10, public_network: str = '10.7.17.0/24', cluster_network: str = '10.7.18.0/24'): + # public network = eth0, cluster_network = eth1 + assert count > 3 + assert public_network + num_disks = host_sample['hdd_count'] + hosts = {} + daemons = {} + daemon_to_host = {} + osd_num = 0 + public_netmask = public_network.split('/')[1] + cluster_ip_list = [] + cluster_netmask = '' + + public_ip_list = [str(i) for i in list(ipaddress.ip_network(public_network).hosts())] + if cluster_network: + cluster_ip_list = [str(i) for i in list(ipaddress.ip_network(cluster_network).hosts())] + cluster_netmask = cluster_network.split('/')[1] + + for n in range(1, count + 1, 1): + + new_host = copy.deepcopy(host_sample) + hostname = f"node-{n}.ceph.com" + + new_host['hostname'] = hostname + new_host['interfaces']['eth0']['ipv4_address'] = f"{public_ip_list.pop(0)}/{public_netmask}" + if cluster_ip_list: + new_host['interfaces']['eth1']['ipv4_address'] = f"{cluster_ip_list.pop(0)}/{cluster_netmask}" + else: + new_host['interfaces']['eth1']['ipv4_address'] = '' + + hosts[hostname] = new_host + daemons[hostname] = {} + for r in role_list(n): + name = '' + if r == 'osd': + for n in range(num_disks): + osd = DaemonDescription( + hostname=hostname, daemon_type='osd', daemon_id=osd_num) + name = f"osd.{osd_num}" + daemons[hostname][name] = osd + daemon_to_host[name] = hostname + osd_num += 1 + else: + name = f"{r}.{hostname}" + daemons[hostname][name] = DaemonDescription( + hostname=hostname, daemon_type=r, daemon_id=hostname) + daemon_to_host[name] = hostname + + logger.debug(f"daemon to host lookup - {json.dumps(daemon_to_host)}") + return hosts, daemons, daemon_to_host + + +@pytest.fixture() +def mgr(): + """Provide a fake ceph mgr object preloaded with a configuration""" + mgr = FakeMgr() + mgr.cache.facts, mgr.cache.daemons, mgr.daemon_to_host = \ + generate_testdata(public_network='10.9.64.0/24', cluster_network='') + mgr.module_option.update({ + "config_checks_enabled": True, + }) + yield mgr + + +class FakeMgr: + + def __init__(self): + self.datastore = {} + self.module_option = {} + self.health_checks = {} + self.default_version = 'quincy' + self.version_overrides = {} + self.daemon_to_host = {} + + self.cache = HostCache(self) + self.upgrade = CephadmUpgrade(self) + + def set_health_checks(self, checks: dict): + return + + def get_module_option(self, keyname: str) -> Optional[str]: + return self.module_option.get(keyname, None) + + def set_module_option(self, keyname: str, value: str) -> None: + return None + + def get_store(self, keyname: str, default=None) -> Optional[str]: + return self.datastore.get(keyname, None) + + def set_store(self, keyname: str, value: str) -> None: + self.datastore[keyname] = value + return None + + def _ceph_get_server(self) -> None: + pass + + def get_metadata(self, daemon_type: str, daemon_id: str) -> Dict[str, Any]: + key = f"{daemon_type}.{daemon_id}" + if key in self.version_overrides: + logger.debug(f"override applied for {key}") + version_str = self.version_overrides[key] + else: + version_str = self.default_version + + return {"ceph_release": version_str, "hostname": self.daemon_to_host[key]} + + def list_servers(self) -> List[Dict[str, List[Dict[str, str]]]]: + num_disks = host_sample['hdd_count'] + osd_num = 0 + service_map = [] + + for hostname in self.cache.facts: + + host_num = int(hostname.split('.')[0].split('-')[1]) + svc_list = [] + for r in role_list(host_num): + if r == 'osd': + for _n in range(num_disks): + svc_list.append({ + "type": "osd", + "id": osd_num, + }) + osd_num += 1 + else: + svc_list.append({ + "type": r, + "id": hostname, + }) + + service_map.append({"services": svc_list}) + logger.debug(f"services map - {json.dumps(service_map)}") + return service_map + + def use_repo_digest(self) -> None: + return None + + +class TestConfigCheck: + + def test_to_json(self, mgr): + checker = CephadmConfigChecks(mgr) + out = checker.to_json() + assert out + assert len(out) == len(checker.health_checks) + + def test_lookup_check(self, mgr): + checker = CephadmConfigChecks(mgr) + check = checker.lookup_check('osd_mtu_size') + logger.debug(json.dumps(check.to_json())) + assert check + assert check.healthcheck_name == "CEPHADM_CHECK_MTU" + + def test_old_checks_removed(self, mgr): + mgr.datastore.update({ + "config_checks": '{"bogus_one": "enabled", "bogus_two": "enabled", ' + '"kernel_security": "enabled", "public_network": "enabled", ' + '"kernel_version": "enabled", "network_missing": "enabled", ' + '"osd_mtu_size": "enabled", "osd_linkspeed": "enabled", ' + '"os_subscription": "enabled", "ceph_release": "enabled"}' + }) + checker = CephadmConfigChecks(mgr) + raw = mgr.get_store('config_checks') + checks = json.loads(raw) + assert "bogus_one" not in checks + assert "bogus_two" not in checks + assert len(checks) == len(checker.health_checks) + + def test_new_checks(self, mgr): + mgr.datastore.update({ + "config_checks": '{"kernel_security": "enabled", "public_network": "enabled", ' + '"osd_mtu_size": "enabled", "osd_linkspeed": "enabled"}' + }) + checker = CephadmConfigChecks(mgr) + raw = mgr.get_store('config_checks') + checks = json.loads(raw) + assert len(checks) == len(checker.health_checks) + + def test_no_issues(self, mgr): + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + checker.run_checks() + + assert not mgr.health_checks + + def test_no_public_network(self, mgr): + bad_node = mgr.cache.facts['node-1.ceph.com'] + bad_node['interfaces']['eth0']['ipv4_address'] = "192.168.1.20/24" + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + checker.run_checks() + logger.debug(mgr.health_checks) + assert len(mgr.health_checks) == 1 + assert 'CEPHADM_CHECK_PUBLIC_MEMBERSHIP' in mgr.health_checks + assert mgr.health_checks['CEPHADM_CHECK_PUBLIC_MEMBERSHIP']['detail'][0] == \ + 'node-1.ceph.com does not have an interface on any public network' + + def test_missing_networks(self, mgr): + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.66.0/24'] + checker.run_checks() + + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert len(mgr.health_checks) == 1 + assert 'CEPHADM_CHECK_NETWORK_MISSING' in mgr.health_checks + assert mgr.health_checks['CEPHADM_CHECK_NETWORK_MISSING']['detail'][0] == \ + "10.9.66.0/24 not found on any host in the cluster" + + def test_bad_mtu_single(self, mgr): + + bad_node = mgr.cache.facts['node-1.ceph.com'] + bad_node['interfaces']['eth0']['mtu'] = 1500 + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert "CEPHADM_CHECK_MTU" in mgr.health_checks and len(mgr.health_checks) == 1 + assert mgr.health_checks['CEPHADM_CHECK_MTU']['detail'][0] == \ + 'host node-1.ceph.com(eth0) is using MTU 1500 on 10.9.64.0/24, NICs on other hosts use 9000' + + def test_bad_mtu_multiple(self, mgr): + + for n in [1, 5]: + bad_node = mgr.cache.facts[f'node-{n}.ceph.com'] + bad_node['interfaces']['eth0']['mtu'] = 1500 + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert "CEPHADM_CHECK_MTU" in mgr.health_checks and len(mgr.health_checks) == 1 + assert mgr.health_checks['CEPHADM_CHECK_MTU']['count'] == 2 + + def test_bad_linkspeed_single(self, mgr): + + bad_node = mgr.cache.facts['node-1.ceph.com'] + bad_node['interfaces']['eth0']['speed'] = 100 + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert mgr.health_checks + assert "CEPHADM_CHECK_LINKSPEED" in mgr.health_checks and len(mgr.health_checks) == 1 + assert mgr.health_checks['CEPHADM_CHECK_LINKSPEED']['detail'][0] == \ + 'host node-1.ceph.com(eth0) has linkspeed of 100 on 10.9.64.0/24, NICs on other hosts use 1000' + + def test_super_linkspeed_single(self, mgr): + + bad_node = mgr.cache.facts['node-1.ceph.com'] + bad_node['interfaces']['eth0']['speed'] = 10000 + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert not mgr.health_checks + + def test_release_mismatch_single(self, mgr): + + mgr.version_overrides = { + "osd.1": "pacific", + } + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + assert mgr.health_checks + assert "CEPHADM_CHECK_CEPH_RELEASE" in mgr.health_checks and len(mgr.health_checks) == 1 + assert mgr.health_checks['CEPHADM_CHECK_CEPH_RELEASE']['detail'][0] == \ + 'osd.1 is running pacific (majority of cluster is using quincy)' + + def test_release_mismatch_multi(self, mgr): + + mgr.version_overrides = { + "osd.1": "pacific", + "osd.5": "octopus", + } + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + assert mgr.health_checks + assert "CEPHADM_CHECK_CEPH_RELEASE" in mgr.health_checks and len(mgr.health_checks) == 1 + assert len(mgr.health_checks['CEPHADM_CHECK_CEPH_RELEASE']['detail']) == 2 + + def test_kernel_mismatch(self, mgr): + + bad_host = mgr.cache.facts['node-1.ceph.com'] + bad_host['kernel'] = "5.10.18.0-241.10.1.el8.x86_64" + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + assert len(mgr.health_checks) == 1 + assert 'CEPHADM_CHECK_KERNEL_VERSION' in mgr.health_checks + assert mgr.health_checks['CEPHADM_CHECK_KERNEL_VERSION']['detail'][0] == \ + "host node-1.ceph.com running kernel 5.10, majority of hosts(9) running 4.18" + assert mgr.health_checks['CEPHADM_CHECK_KERNEL_VERSION']['count'] == 1 + + def test_inconsistent_subscription(self, mgr): + + bad_host = mgr.cache.facts['node-5.ceph.com'] + bad_host['subscribed'] = "no" + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + assert len(mgr.health_checks) == 1 + assert "CEPHADM_CHECK_SUBSCRIPTION" in mgr.health_checks + assert mgr.health_checks['CEPHADM_CHECK_SUBSCRIPTION']['detail'][0] == \ + "node-5.ceph.com does not have an active subscription" + + def test_kernel_security_inconsistent(self, mgr): + + bad_node = mgr.cache.facts['node-3.ceph.com'] + bad_node['kernel_security'] = { + "SELINUX": "permissive", + "SELINUXTYPE": "targeted", + "description": "SELinux: Enabled(permissive, targeted)", + "type": "SELinux" + } + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + assert len(mgr.health_checks) == 1 + assert 'CEPHADM_CHECK_KERNEL_LSM' in mgr.health_checks + assert mgr.health_checks['CEPHADM_CHECK_KERNEL_LSM']['detail'][0] == \ + "node-3.ceph.com has inconsistent KSM settings compared to the majority of hosts(9) in the cluster" + + def test_release_and_bad_mtu(self, mgr): + + mgr.version_overrides = { + "osd.1": "pacific", + } + bad_node = mgr.cache.facts['node-1.ceph.com'] + bad_node['interfaces']['eth0']['mtu'] = 1500 + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert mgr.health_checks + assert len(mgr.health_checks) == 2 + assert "CEPHADM_CHECK_CEPH_RELEASE" in mgr.health_checks and \ + "CEPHADM_CHECK_MTU" in mgr.health_checks + + def test_release_mtu_LSM(self, mgr): + + mgr.version_overrides = { + "osd.1": "pacific", + } + bad_node1 = mgr.cache.facts['node-1.ceph.com'] + bad_node1['interfaces']['eth0']['mtu'] = 1500 + bad_node2 = mgr.cache.facts['node-3.ceph.com'] + bad_node2['kernel_security'] = { + "SELINUX": "permissive", + "SELINUXTYPE": "targeted", + "description": "SELinux: Enabled(permissive, targeted)", + "type": "SELinux" + } + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert mgr.health_checks + assert len(mgr.health_checks) == 3 + assert \ + "CEPHADM_CHECK_CEPH_RELEASE" in mgr.health_checks and \ + "CEPHADM_CHECK_MTU" in mgr.health_checks and \ + "CEPHADM_CHECK_KERNEL_LSM" in mgr.health_checks + + def test_release_mtu_LSM_subscription(self, mgr): + + mgr.version_overrides = { + "osd.1": "pacific", + } + bad_node1 = mgr.cache.facts['node-1.ceph.com'] + bad_node1['interfaces']['eth0']['mtu'] = 1500 + bad_node1['subscribed'] = "no" + bad_node2 = mgr.cache.facts['node-3.ceph.com'] + bad_node2['kernel_security'] = { + "SELINUX": "permissive", + "SELINUXTYPE": "targeted", + "description": "SELinux: Enabled(permissive, targeted)", + "type": "SELinux" + } + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(json.dumps(mgr.health_checks)) + logger.info(checker.subnet_lookup) + assert mgr.health_checks + assert len(mgr.health_checks) == 4 + assert \ + "CEPHADM_CHECK_CEPH_RELEASE" in mgr.health_checks and \ + "CEPHADM_CHECK_MTU" in mgr.health_checks and \ + "CEPHADM_CHECK_KERNEL_LSM" in mgr.health_checks and \ + "CEPHADM_CHECK_SUBSCRIPTION" in mgr.health_checks + + def test_skip_release_during_upgrade(self, mgr): + mgr.upgrade.upgrade_state = UpgradeState.from_json({ + 'target_name': 'wah', + 'progress_id': str(uuid.uuid4()), + 'target_id': 'wah', + 'error': '', + 'paused': False, + }) + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(f"{checker.skipped_checks_count} skipped check(s): {checker.skipped_checks}") + assert checker.skipped_checks_count == 1 + assert 'ceph_release' in checker.skipped_checks + + def test_skip_when_disabled(self, mgr): + mgr.module_option.update({ + "config_checks_enabled": "false" + }) + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(checker.active_checks) + logger.info(checker.defined_checks) + assert checker.active_checks_count == 0 + + def test_skip_mtu_checks(self, mgr): + mgr.datastore.update({ + 'config_checks': '{"osd_mtu_size": "disabled"}' + }) + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(checker.active_checks) + logger.info(checker.defined_checks) + assert 'osd_mtu_size' not in checker.active_checks + assert checker.defined_checks == 8 and checker.active_checks_count == 7 + + def test_skip_mtu_lsm_checks(self, mgr): + mgr.datastore.update({ + 'config_checks': '{"osd_mtu_size": "disabled", "kernel_security": "disabled"}' + }) + + checker = CephadmConfigChecks(mgr) + checker.cluster_network_list = [] + checker.public_network_list = ['10.9.64.0/24'] + + checker.run_checks() + logger.info(checker.active_checks) + logger.info(checker.defined_checks) + assert 'osd_mtu_size' not in checker.active_checks and \ + 'kernel_security' not in checker.active_checks + assert checker.defined_checks == 8 and checker.active_checks_count == 6 + assert not mgr.health_checks diff --git a/src/pybind/mgr/cephadm/tests/test_facts.py b/src/pybind/mgr/cephadm/tests/test_facts.py new file mode 100644 index 000000000..7838ee5d4 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_facts.py @@ -0,0 +1,31 @@ +from ..import CephadmOrchestrator + +from .fixtures import wait + +from tests import mock + + +def test_facts(cephadm_module: CephadmOrchestrator): + facts = {'node-1.ceph.com': {'bios_version': 'F2', 'cpu_cores': 16}} + cephadm_module.cache.facts = facts + ret_facts = cephadm_module.get_facts('node-1.ceph.com') + assert wait(cephadm_module, ret_facts) == [{'bios_version': 'F2', 'cpu_cores': 16}] + + +@mock.patch("cephadm.inventory.Inventory.update_known_hostnames") +def test_known_hostnames(_update_known_hostnames, cephadm_module: CephadmOrchestrator): + host_facts = {'hostname': 'host1.domain', + 'shortname': 'host1', + 'fqdn': 'host1.domain', + 'memory_free_kb': 37383384, + 'memory_total_kb': 40980612, + 'nic_count': 2} + cephadm_module.cache.update_host_facts('host1', host_facts) + _update_known_hostnames.assert_called_with('host1.domain', 'host1', 'host1.domain') + + host_facts = {'hostname': 'host1.domain', + 'memory_free_kb': 37383384, + 'memory_total_kb': 40980612, + 'nic_count': 2} + cephadm_module.cache.update_host_facts('host1', host_facts) + _update_known_hostnames.assert_called_with('host1.domain', '', '') diff --git a/src/pybind/mgr/cephadm/tests/test_migration.py b/src/pybind/mgr/cephadm/tests/test_migration.py new file mode 100644 index 000000000..1f1d32e8b --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_migration.py @@ -0,0 +1,340 @@ +import json +import pytest + +from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec +from ceph.utils import datetime_to_str, datetime_now +from cephadm import CephadmOrchestrator +from cephadm.inventory import SPEC_STORE_PREFIX +from cephadm.migrations import LAST_MIGRATION +from cephadm.tests.fixtures import _run_cephadm, wait, with_host, receive_agent_metadata_all_hosts +from cephadm.serve import CephadmServe +from tests import mock + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1', refresh_hosts=False): + with with_host(cephadm_module, 'host2', refresh_hosts=False): + + # emulate the old scheduler: + c = cephadm_module.apply_rgw( + ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2)) + ) + assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' + + # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."): + CephadmServe(cephadm_module)._apply_all_services() + + cephadm_module.migration_current = 0 + cephadm_module.migration.migrate() + # assert we need all daemons. + assert cephadm_module.migration_current == 0 + + CephadmServe(cephadm_module)._refresh_hosts_and_daemons() + receive_agent_metadata_all_hosts(cephadm_module) + cephadm_module.migration.migrate() + + CephadmServe(cephadm_module)._apply_all_services() + + out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())} + assert out == {'host1', 'host2'} + + c = cephadm_module.apply_rgw( + ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2)) + ) + assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' + + # Sorry, for this hack, but I need to make sure, Migration thinks, + # we have updated all daemons already. + cephadm_module.cache.last_daemon_update['host1'] = datetime_now() + cephadm_module.cache.last_daemon_update['host2'] = datetime_now() + + cephadm_module.migration_current = 0 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current >= 2 + + out = [o.spec.placement for o in wait( + cephadm_module, cephadm_module.describe_service())] + assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec( + hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])] + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({ + 'spec': { + 'service_type': 'mon', + 'service_id': 'wrong', + 'placement': { + 'hosts': ['host1'] + } + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + + cephadm_module.spec_store.load() + + assert len(cephadm_module.spec_store.all_specs) == 1 + assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name() == 'mon' + + cephadm_module.migration_current = 1 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current >= 2 + + assert len(cephadm_module.spec_store.all_specs) == 1 + assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec( + service_type='mon', + unmanaged=True, + placement=PlacementSpec(hosts=['host1']) + ) + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', json.dumps({ + 'spec': { + 'service_type': 'mon', + 'placement': { + 'count': 5, + } + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({ + 'spec': { + 'service_type': 'mon', + 'service_id': 'wrong', + 'placement': { + 'hosts': ['host1'] + } + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + + cephadm_module.spec_store.load() + + assert len(cephadm_module.spec_store.all_specs) == 2 + assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name() == 'mon' + assert cephadm_module.spec_store.all_specs['mon'].service_name() == 'mon' + + cephadm_module.migration_current = 1 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current >= 2 + + assert len(cephadm_module.spec_store.all_specs) == 1 + assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec( + service_type='mon', + unmanaged=True, + placement=PlacementSpec(count=5) + ) + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({ + 'spec': { + 'service_type': 'mds', + 'placement': { + 'hosts': ['host1'] + } + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + + cephadm_module.spec_store.load() + + # there is nothing to migrate, as the spec is gone now. + assert len(cephadm_module.spec_store.all_specs) == 0 + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store( + SPEC_STORE_PREFIX + 'mds', + json.dumps({ + 'spec': { + 'service_type': 'nfs', + 'service_id': 'foo', + 'placement': { + 'hosts': ['host1'] + }, + 'spec': { + 'pool': 'mypool', + 'namespace': 'foons', + }, + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + cephadm_module.migration_current = 1 + cephadm_module.spec_store.load() + + ls = json.loads(cephadm_module.get_store('nfs_migration_queue')) + assert ls == [['foo', 'mypool', 'foons']] + + cephadm_module.migration.migrate(True) + assert cephadm_module.migration_current == 2 + + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == LAST_MIGRATION + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store( + SPEC_STORE_PREFIX + 'mds', + json.dumps({ + 'spec': { + 'service_type': 'nfs', + 'service_id': 'ganesha-foo', + 'placement': { + 'hosts': ['host1'] + }, + 'spec': { + 'pool': 'mypool', + 'namespace': 'foons', + }, + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + cephadm_module.migration_current = 1 + cephadm_module.spec_store.load() + + ls = json.loads(cephadm_module.get_store('nfs_migration_queue')) + assert ls == [['ganesha-foo', 'mypool', 'foons']] + + cephadm_module.migration.migrate(True) + assert cephadm_module.migration_current == 2 + + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == LAST_MIGRATION + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator): + assert 'client.admin' not in cephadm_module.keys.keys + + cephadm_module.migration_current = 3 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == LAST_MIGRATION + + assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin' + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_set_sane_value(cephadm_module: CephadmOrchestrator): + cephadm_module.migration_current = 0 + cephadm_module.migration.set_sane_migration_current() + assert cephadm_module.migration_current == 0 + + cephadm_module.migration_current = LAST_MIGRATION + cephadm_module.migration.set_sane_migration_current() + assert cephadm_module.migration_current == LAST_MIGRATION + + cephadm_module.migration_current = None + cephadm_module.migration.set_sane_migration_current() + assert cephadm_module.migration_current == LAST_MIGRATION + + cephadm_module.migration_current = LAST_MIGRATION + 1 + cephadm_module.migration.set_sane_migration_current() + assert cephadm_module.migration_current == 0 + + cephadm_module.migration_current = None + ongoing = cephadm_module.migration.is_migration_ongoing() + assert not ongoing + assert cephadm_module.migration_current == LAST_MIGRATION + + cephadm_module.migration_current = LAST_MIGRATION + 1 + ongoing = cephadm_module.migration.is_migration_ongoing() + assert ongoing + assert cephadm_module.migration_current == 0 + + +@pytest.mark.parametrize( + "rgw_spec_store_entry, should_migrate", + [ + ({ + 'spec': { + 'service_type': 'rgw', + 'service_name': 'rgw.foo', + 'service_id': 'foo', + 'placement': { + 'hosts': ['host1'] + }, + 'spec': { + 'rgw_frontend_type': 'beast tcp_nodelay=1 request_timeout_ms=65000 rgw_thread_pool_size=512', + 'rgw_frontend_port': '5000', + }, + }, + 'created': datetime_to_str(datetime_now()), + }, True), + ({ + 'spec': { + 'service_type': 'rgw', + 'service_name': 'rgw.foo', + 'service_id': 'foo', + 'placement': { + 'hosts': ['host1'] + }, + }, + 'created': datetime_to_str(datetime_now()), + }, False), + ] +) +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_rgw_spec(cephadm_module: CephadmOrchestrator, rgw_spec_store_entry, should_migrate): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store( + SPEC_STORE_PREFIX + 'rgw', + json.dumps(rgw_spec_store_entry, sort_keys=True), + ) + + # make sure rgw_migration_queue is populated accordingly + cephadm_module.migration_current = 1 + cephadm_module.spec_store.load() + ls = json.loads(cephadm_module.get_store('rgw_migration_queue')) + assert 'rgw' == ls[0]['spec']['service_type'] + + # shortcut rgw_migration_queue loading by directly assigning + # ls output to rgw_migration_queue list + cephadm_module.migration.rgw_migration_queue = ls + + # skip other migrations and go directly to 5_6 migration (RGW spec) + cephadm_module.migration_current = 5 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == LAST_MIGRATION + + if should_migrate: + # make sure the spec has been migrated and the the param=value entries + # that were part of the rgw_frontend_type are now in the new + # 'rgw_frontend_extra_args' list + assert 'rgw.foo' in cephadm_module.spec_store.all_specs + rgw_spec = cephadm_module.spec_store.all_specs['rgw.foo'] + assert dict(rgw_spec.to_json()) == {'service_type': 'rgw', + 'service_id': 'foo', + 'service_name': 'rgw.foo', + 'placement': {'hosts': ['host1']}, + 'spec': { + 'rgw_frontend_extra_args': ['tcp_nodelay=1', + 'request_timeout_ms=65000', + 'rgw_thread_pool_size=512'], + 'rgw_frontend_port': '5000', + 'rgw_frontend_type': 'beast', + }} + else: + # in a real environment, we still expect the spec to be there, + # just untouched by the migration. For this test specifically + # though, the spec will only have ended up in the spec store + # if it was migrated, so we can use this to test the spec + # was untouched + assert 'rgw.foo' not in cephadm_module.spec_store.all_specs diff --git a/src/pybind/mgr/cephadm/tests/test_osd_removal.py b/src/pybind/mgr/cephadm/tests/test_osd_removal.py new file mode 100644 index 000000000..6685fcb2a --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_osd_removal.py @@ -0,0 +1,298 @@ +import json + +from cephadm.services.osd import OSDRemovalQueue, OSD +import pytest +from tests import mock +from .fixtures import with_cephadm_module +from datetime import datetime + + +class MockOSD: + + def __init__(self, osd_id): + self.osd_id = osd_id + + +class TestOSDRemoval: + + @pytest.mark.parametrize( + "osd_id, osd_df, expected", + [ + # missing 'nodes' key + (1, dict(nodes=[]), -1), + # missing 'pgs' key + (1, dict(nodes=[dict(id=1)]), -1), + # id != osd_id + (1, dict(nodes=[dict(id=999, pgs=1)]), -1), + # valid + (1, dict(nodes=[dict(id=1, pgs=1)]), 1), + ] + ) + def test_get_pg_count(self, rm_util, osd_id, osd_df, expected): + with mock.patch("cephadm.services.osd.RemoveUtil.osd_df", return_value=osd_df): + assert rm_util.get_pg_count(osd_id) == expected + + @pytest.mark.parametrize( + "osds, ok_to_stop, expected", + [ + # no osd_ids provided + ([], [False], []), + # all osds are ok_to_stop + ([1, 2], [True], [1, 2]), + # osds are ok_to_stop after the second iteration + ([1, 2], [False, True], [2]), + # osds are never ok_to_stop, (taking the sample size `(len(osd_ids))` into account), + # expected to get False + ([1, 2], [False, False], []), + ] + ) + def test_find_stop_threshold(self, rm_util, osds, ok_to_stop, expected): + with mock.patch("cephadm.services.osd.RemoveUtil.ok_to_stop", side_effect=ok_to_stop): + assert rm_util.find_osd_stop_threshold(osds) == expected + + def test_process_removal_queue(self, rm_util): + # TODO: ! + # rm_util.process_removal_queue() + pass + + @pytest.mark.parametrize( + "max_osd_draining_count, draining_osds, idling_osds, ok_to_stop, expected", + [ + # drain one at a time, one already draining + (1, [1], [1], [True], 0), + # drain one at a time, none draining yet + (1, [], [1, 2, 3], [True, True, True], 1), + # drain one at a time, one already draining, none ok-to-stop + (1, [1], [1], [False], 0), + # drain one at a time, none draining, one ok-to-stop + (1, [], [1, 2, 3], [False, False, True], 1), + # drain three at a time, one already draining, all ok-to-stop + (3, [1], [1, 2, 3], [True, True, True], 2), + # drain two at a time, none already draining, none ok-to-stop + (2, [], [1, 2, 3], [False, False, False], 0), + # drain two at a time, none already draining, none idling + (2, [], [], [], 0), + ] + ) + def test_ready_to_drain_osds(self, max_osd_draining_count, draining_osds, idling_osds, ok_to_stop, expected): + with with_cephadm_module({'max_osd_draining_count': max_osd_draining_count}) as m: + with mock.patch("cephadm.services.osd.OSDRemovalQueue.draining_osds", return_value=draining_osds): + with mock.patch("cephadm.services.osd.OSDRemovalQueue.idling_osds", return_value=idling_osds): + with mock.patch("cephadm.services.osd.RemoveUtil.ok_to_stop", side_effect=ok_to_stop): + removal_queue = OSDRemovalQueue(m) + assert len(removal_queue._ready_to_drain_osds()) == expected + + def test_ok_to_stop(self, rm_util): + rm_util.ok_to_stop([MockOSD(1)]) + rm_util._run_mon_cmd.assert_called_with({'prefix': 'osd ok-to-stop', 'ids': ['1']}, + error_ok=True) + + def test_safe_to_destroy(self, rm_util): + rm_util.safe_to_destroy([1]) + rm_util._run_mon_cmd.assert_called_with({'prefix': 'osd safe-to-destroy', + 'ids': ['1']}, error_ok=True) + + def test_destroy_osd(self, rm_util): + rm_util.destroy_osd(1) + rm_util._run_mon_cmd.assert_called_with( + {'prefix': 'osd destroy-actual', 'id': 1, 'yes_i_really_mean_it': True}) + + def test_purge_osd(self, rm_util): + rm_util.purge_osd(1) + rm_util._run_mon_cmd.assert_called_with( + {'prefix': 'osd purge-actual', 'id': 1, 'yes_i_really_mean_it': True}) + + def test_load(self, cephadm_module, rm_util): + data = json.dumps([ + { + "osd_id": 35, + "started": True, + "draining": True, + "stopped": False, + "replace": False, + "force": False, + "zap": False, + "nodename": "node2", + "drain_started_at": "2020-09-14T11:41:53.960463", + "drain_stopped_at": None, + "drain_done_at": None, + "process_started_at": "2020-09-14T11:41:52.245832" + } + ]) + cephadm_module.set_store('osd_remove_queue', data) + cephadm_module.to_remove_osds.load_from_store() + + expected = OSDRemovalQueue(cephadm_module) + expected.osds.add(OSD(osd_id=35, remove_util=rm_util, draining=True)) + assert cephadm_module.to_remove_osds == expected + + +class TestOSD: + + def test_start(self, osd_obj): + assert osd_obj.started is False + osd_obj.start() + assert osd_obj.started is True + assert osd_obj.stopped is False + + def test_start_draining_purge(self, osd_obj): + assert osd_obj.draining is False + assert osd_obj.drain_started_at is None + ret = osd_obj.start_draining() + osd_obj.rm_util.reweight_osd.assert_called_with(osd_obj, 0.0) + assert isinstance(osd_obj.drain_started_at, datetime) + assert osd_obj.draining is True + assert osd_obj.replace is False + assert ret is True + + def test_start_draining_replace(self, osd_obj): + assert osd_obj.draining is False + assert osd_obj.drain_started_at is None + osd_obj.replace = True + ret = osd_obj.start_draining() + osd_obj.rm_util.set_osd_flag.assert_called_with([osd_obj], 'out') + assert isinstance(osd_obj.drain_started_at, datetime) + assert osd_obj.draining is True + assert osd_obj.replace is True + assert ret is True + + def test_start_draining_stopped(self, osd_obj): + osd_obj.stopped = True + ret = osd_obj.start_draining() + assert osd_obj.drain_started_at is None + assert ret is False + assert osd_obj.draining is False + + def test_stop_draining_replace(self, osd_obj): + osd_obj.replace = True + ret = osd_obj.stop_draining() + osd_obj.rm_util.set_osd_flag.assert_called_with([osd_obj], 'in') + assert isinstance(osd_obj.drain_stopped_at, datetime) + assert osd_obj.draining is False + assert ret is True + + def test_stop_draining_purge(self, osd_obj): + osd_obj.original_weight = 1.0 + ret = osd_obj.stop_draining() + osd_obj.rm_util.reweight_osd.assert_called_with(osd_obj, 1.0) + assert isinstance(osd_obj.drain_stopped_at, datetime) + assert osd_obj.draining is False + assert ret is True + + @mock.patch('cephadm.services.osd.OSD.stop_draining') + def test_stop(self, stop_draining_mock, osd_obj): + osd_obj.stop() + assert osd_obj.started is False + assert osd_obj.stopped is True + stop_draining_mock.assert_called_once() + + @pytest.mark.parametrize( + "draining, empty, expected", + [ + # must be !draining! and !not empty! to yield True + (True, not True, True), + # not draining and not empty + (False, not True, False), + # not draining and empty + (False, True, False), + # draining and empty + (True, True, False), + ] + ) + def test_is_draining(self, osd_obj, draining, empty, expected): + with mock.patch("cephadm.services.osd.OSD.is_empty", new_callable=mock.PropertyMock(return_value=empty)): + osd_obj.draining = draining + assert osd_obj.is_draining is expected + + @mock.patch("cephadm.services.osd.RemoveUtil.ok_to_stop") + def test_is_ok_to_stop(self, _, osd_obj): + osd_obj.is_ok_to_stop + osd_obj.rm_util.ok_to_stop.assert_called_once() + + @pytest.mark.parametrize( + "pg_count, expected", + [ + (0, True), + (1, False), + (9999, False), + (-1, False), + ] + ) + def test_is_empty(self, osd_obj, pg_count, expected): + with mock.patch("cephadm.services.osd.OSD.get_pg_count", return_value=pg_count): + assert osd_obj.is_empty is expected + + @mock.patch("cephadm.services.osd.RemoveUtil.safe_to_destroy") + def test_safe_to_destroy(self, _, osd_obj): + osd_obj.safe_to_destroy() + osd_obj.rm_util.safe_to_destroy.assert_called_once() + + @mock.patch("cephadm.services.osd.RemoveUtil.set_osd_flag") + def test_down(self, _, osd_obj): + osd_obj.down() + osd_obj.rm_util.set_osd_flag.assert_called_with([osd_obj], 'down') + + @mock.patch("cephadm.services.osd.RemoveUtil.destroy_osd") + def test_destroy_osd(self, _, osd_obj): + osd_obj.destroy() + osd_obj.rm_util.destroy_osd.assert_called_once() + + @mock.patch("cephadm.services.osd.RemoveUtil.purge_osd") + def test_purge(self, _, osd_obj): + osd_obj.purge() + osd_obj.rm_util.purge_osd.assert_called_once() + + @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count") + def test_pg_count(self, _, osd_obj): + osd_obj.get_pg_count() + osd_obj.rm_util.get_pg_count.assert_called_once() + + def test_drain_status_human_not_started(self, osd_obj): + assert osd_obj.drain_status_human() == 'not started' + + def test_drain_status_human_started(self, osd_obj): + osd_obj.started = True + assert osd_obj.drain_status_human() == 'started' + + def test_drain_status_human_draining(self, osd_obj): + osd_obj.started = True + osd_obj.draining = True + assert osd_obj.drain_status_human() == 'draining' + + def test_drain_status_human_done(self, osd_obj): + osd_obj.started = True + osd_obj.draining = False + osd_obj.drain_done_at = datetime.utcnow() + assert osd_obj.drain_status_human() == 'done, waiting for purge' + + +class TestOSDRemovalQueue: + + def test_queue_size(self, osd_obj): + q = OSDRemovalQueue(mock.Mock()) + assert q.queue_size() == 0 + q.osds.add(osd_obj) + assert q.queue_size() == 1 + + @mock.patch("cephadm.services.osd.OSD.start") + @mock.patch("cephadm.services.osd.OSD.exists") + def test_enqueue(self, exist, start, osd_obj): + q = OSDRemovalQueue(mock.Mock()) + q.enqueue(osd_obj) + osd_obj.start.assert_called_once() + + @mock.patch("cephadm.services.osd.OSD.stop") + @mock.patch("cephadm.services.osd.OSD.exists") + def test_rm_raise(self, exist, stop, osd_obj): + q = OSDRemovalQueue(mock.Mock()) + with pytest.raises(KeyError): + q.rm(osd_obj) + osd_obj.stop.assert_called_once() + + @mock.patch("cephadm.services.osd.OSD.stop") + @mock.patch("cephadm.services.osd.OSD.exists") + def test_rm(self, exist, stop, osd_obj): + q = OSDRemovalQueue(mock.Mock()) + q.osds.add(osd_obj) + q.rm(osd_obj) + osd_obj.stop.assert_called_once() diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py new file mode 100644 index 000000000..067cd5028 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -0,0 +1,1699 @@ +# Disable autopep8 for this file: + +# fmt: off + +from typing import NamedTuple, List, Dict, Optional +import pytest + +from ceph.deployment.hostspec import HostSpec +from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, IngressSpec +from ceph.deployment.hostspec import SpecValidationError + +from cephadm.module import HostAssignment +from cephadm.schedule import DaemonPlacement +from orchestrator import DaemonDescription, OrchestratorValidationError, OrchestratorError + + +def wrapper(func): + # some odd thingy to revert the order or arguments + def inner(*args): + def inner2(expected): + func(expected, *args) + return inner2 + return inner + + +@wrapper +def none(expected): + assert expected == [] + + +@wrapper +def one_of(expected, *hosts): + if not isinstance(expected, list): + assert False, str(expected) + assert len(expected) == 1, f'one_of failed len({expected}) != 1' + assert expected[0] in hosts + + +@wrapper +def two_of(expected, *hosts): + if not isinstance(expected, list): + assert False, str(expected) + assert len(expected) == 2, f'one_of failed len({expected}) != 2' + matches = 0 + for h in hosts: + matches += int(h in expected) + if matches != 2: + assert False, f'two of {hosts} not in {expected}' + + +@wrapper +def exactly(expected, *hosts): + assert expected == list(hosts) + + +@wrapper +def error(expected, kind, match): + assert isinstance(expected, kind), (str(expected), match) + assert str(expected) == match, (str(expected), match) + + +@wrapper +def _or(expected, *inners): + def catch(inner): + try: + inner(expected) + except AssertionError as e: + return e + result = [catch(i) for i in inners] + if None not in result: + assert False, f"_or failed: {expected}" + + +def _always_true(_): + pass + + +def k(s): + return [e for e in s.split(' ') if e] + + +def get_result(key, results): + def match(one): + for o, k in zip(one, key): + if o != k and o != '*': + return False + return True + return [v for k, v in results if match(k)][0] + + +def mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count): + + if spec_section == 'hosts': + mk_spec = lambda: ServiceSpec('mgr', placement=PlacementSpec( # noqa: E731 + hosts=explicit, + count=count, + )) + elif spec_section == 'label': + mk_spec = lambda: ServiceSpec('mgr', placement=PlacementSpec( # noqa: E731 + label='mylabel', + count=count, + )) + elif spec_section == 'host_pattern': + pattern = { + 'e': 'notfound', + '1': '1', + '12': '[1-2]', + '123': '*', + }[explicit_key] + mk_spec = lambda: ServiceSpec('mgr', placement=PlacementSpec( # noqa: E731 + host_pattern=pattern, + count=count, + )) + else: + assert False + + hosts = [ + HostSpec(h, labels=['mylabel']) if h in explicit else HostSpec(h) + for h in hosts + ] + + return mk_spec, hosts + + +def run_scheduler_test(results, mk_spec, hosts, daemons, key_elems): + key = ' '.join('N' if e is None else str(e) for e in key_elems) + try: + assert_res = get_result(k(key), results) + except IndexError: + try: + spec = mk_spec() + host_res, to_add, to_remove = HostAssignment( + spec=spec, + hosts=hosts, + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + ).place() + if isinstance(host_res, list): + e = ', '.join(repr(h.hostname) for h in host_res) + assert False, f'`(k("{key}"), exactly({e})),` not found' + assert False, f'`(k("{key}"), ...),` not found' + except OrchestratorError as e: + assert False, f'`(k("{key}"), error({type(e).__name__}, {repr(str(e))})),` not found' + + for _ in range(10): # scheduler has a random component + try: + spec = mk_spec() + host_res, to_add, to_remove = HostAssignment( + spec=spec, + hosts=hosts, + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons + ).place() + + assert_res(sorted([h.hostname for h in host_res])) + except Exception as e: + assert_res(e) + + +@pytest.mark.parametrize("dp,n,result", + [ # noqa: E128 + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]), + 0, + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]), + ), + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]), + 2, + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[82]), + ), + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80, 90]), + 2, + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[82, 92]), + ), + ]) +def test_daemon_placement_renumber(dp, n, result): + assert dp.renumber_ports(n) == result + + +@pytest.mark.parametrize( + 'dp,dd,result', + [ + ( + DaemonPlacement(daemon_type='mgr', hostname='host1'), + DaemonDescription('mgr', 'a', 'host1'), + True + ), + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', name='a'), + DaemonDescription('mgr', 'a', 'host1'), + True + ), + ( + DaemonPlacement(daemon_type='mon', hostname='host1', name='a'), + DaemonDescription('mgr', 'a', 'host1'), + False + ), + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', name='a'), + DaemonDescription('mgr', 'b', 'host1'), + False + ), + ]) +def test_daemon_placement_match(dp, dd, result): + assert dp.matches_daemon(dd) == result + + +# * first match from the top wins +# * where e=[], *=any +# +# + list of known hosts available for scheduling (host_key) +# | + hosts used for explict placement (explicit_key) +# | | + count +# | | | + section (host, label, pattern) +# | | | | + expected result +# | | | | | +test_explicit_scheduler_results = [ + (k("* * 0 *"), error(SpecValidationError, 'num/count must be >= 1')), + (k("* e N l"), error(OrchestratorValidationError, 'Cannot place : No matching hosts for label mylabel')), + (k("* e N p"), error(OrchestratorValidationError, 'Cannot place : No matching hosts')), + (k("* e N h"), error(OrchestratorValidationError, 'placement spec is empty: no hosts, no label, no pattern, no count')), + (k("* e * *"), none), + (k("1 12 * h"), error(OrchestratorValidationError, "Cannot place on 2: Unknown hosts")), + (k("1 123 * h"), error(OrchestratorValidationError, "Cannot place on 2, 3: Unknown hosts")), + (k("1 * * *"), exactly('1')), + (k("12 1 * *"), exactly('1')), + (k("12 12 1 *"), one_of('1', '2')), + (k("12 12 * *"), exactly('1', '2')), + (k("12 123 * h"), error(OrchestratorValidationError, "Cannot place on 3: Unknown hosts")), + (k("12 123 1 *"), one_of('1', '2', '3')), + (k("12 123 * *"), two_of('1', '2', '3')), + (k("123 1 * *"), exactly('1')), + (k("123 12 1 *"), one_of('1', '2')), + (k("123 12 * *"), exactly('1', '2')), + (k("123 123 1 *"), one_of('1', '2', '3')), + (k("123 123 2 *"), two_of('1', '2', '3')), + (k("123 123 * *"), exactly('1', '2', '3')), +] + + +@pytest.mark.parametrize("spec_section_key,spec_section", + [ # noqa: E128 + ('h', 'hosts'), + ('l', 'label'), + ('p', 'host_pattern'), + ]) +@pytest.mark.parametrize("count", + [ # noqa: E128 + None, + 0, + 1, + 2, + 3, + ]) +@pytest.mark.parametrize("explicit_key, explicit", + [ # noqa: E128 + ('e', []), + ('1', ['1']), + ('12', ['1', '2']), + ('123', ['1', '2', '3']), + ]) +@pytest.mark.parametrize("host_key, hosts", + [ # noqa: E128 + ('1', ['1']), + ('12', ['1', '2']), + ('123', ['1', '2', '3']), + ]) +def test_explicit_scheduler(host_key, hosts, + explicit_key, explicit, + count, + spec_section_key, spec_section): + + mk_spec, hosts = mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count) + run_scheduler_test( + results=test_explicit_scheduler_results, + mk_spec=mk_spec, + hosts=hosts, + daemons=[], + key_elems=(host_key, explicit_key, count, spec_section_key) + ) + + +# * first match from the top wins +# * where e=[], *=any +# +# + list of known hosts available for scheduling (host_key) +# | + hosts used for explicit placement (explicit_key) +# | | + count +# | | | + existing daemons +# | | | | + section (host, label, pattern) +# | | | | | + expected result +# | | | | | | +test_scheduler_daemons_results = [ + (k("* 1 * * *"), exactly('1')), + (k("1 123 * * h"), error(OrchestratorValidationError, 'Cannot place on 2, 3: Unknown hosts')), + (k("1 123 * * *"), exactly('1')), + (k("12 123 * * h"), error(OrchestratorValidationError, 'Cannot place on 3: Unknown hosts')), + (k("12 123 N * *"), exactly('1', '2')), + (k("12 123 1 * *"), one_of('1', '2')), + (k("12 123 2 * *"), exactly('1', '2')), + (k("12 123 3 * *"), exactly('1', '2')), + (k("123 123 N * *"), exactly('1', '2', '3')), + (k("123 123 1 e *"), one_of('1', '2', '3')), + (k("123 123 1 1 *"), exactly('1')), + (k("123 123 1 3 *"), exactly('3')), + (k("123 123 1 12 *"), one_of('1', '2')), + (k("123 123 1 112 *"), one_of('1', '2')), + (k("123 123 1 23 *"), one_of('2', '3')), + (k("123 123 1 123 *"), one_of('1', '2', '3')), + (k("123 123 2 e *"), two_of('1', '2', '3')), + (k("123 123 2 1 *"), _or(exactly('1', '2'), exactly('1', '3'))), + (k("123 123 2 3 *"), _or(exactly('1', '3'), exactly('2', '3'))), + (k("123 123 2 12 *"), exactly('1', '2')), + (k("123 123 2 112 *"), exactly('1', '2')), + (k("123 123 2 23 *"), exactly('2', '3')), + (k("123 123 2 123 *"), two_of('1', '2', '3')), + (k("123 123 3 * *"), exactly('1', '2', '3')), +] + + +@pytest.mark.parametrize("spec_section_key,spec_section", + [ # noqa: E128 + ('h', 'hosts'), + ('l', 'label'), + ('p', 'host_pattern'), + ]) +@pytest.mark.parametrize("daemons_key, daemons", + [ # noqa: E128 + ('e', []), + ('1', ['1']), + ('3', ['3']), + ('12', ['1', '2']), + ('112', ['1', '1', '2']), # deal with existing co-located daemons + ('23', ['2', '3']), + ('123', ['1', '2', '3']), + ]) +@pytest.mark.parametrize("count", + [ # noqa: E128 + None, + 1, + 2, + 3, + ]) +@pytest.mark.parametrize("explicit_key, explicit", + [ # noqa: E128 + ('1', ['1']), + ('123', ['1', '2', '3']), + ]) +@pytest.mark.parametrize("host_key, hosts", + [ # noqa: E128 + ('1', ['1']), + ('12', ['1', '2']), + ('123', ['1', '2', '3']), + ]) +def test_scheduler_daemons(host_key, hosts, + explicit_key, explicit, + count, + daemons_key, daemons, + spec_section_key, spec_section): + mk_spec, hosts = mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count) + dds = [ + DaemonDescription('mgr', d, d) + for d in daemons + ] + run_scheduler_test( + results=test_scheduler_daemons_results, + mk_spec=mk_spec, + hosts=hosts, + daemons=dds, + key_elems=(host_key, explicit_key, count, daemons_key, spec_section_key) + ) + + +# ========================= + + +class NodeAssignmentTest(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + daemons: List[DaemonDescription] + rank_map: Optional[Dict[int, Dict[int, Optional[str]]]] + post_rank_map: Optional[Dict[int, Dict[int, Optional[str]]]] + expected: List[str] + expected_add: List[str] + expected_remove: List[DaemonDescription] + + +@pytest.mark.parametrize("service_type,placement,hosts,daemons,rank_map,post_rank_map,expected,expected_add,expected_remove", + [ # noqa: E128 + # just hosts + NodeAssignmentTest( + 'mgr', + PlacementSpec(hosts=['smithi060']), + ['smithi060'], + [], + None, None, + ['mgr:smithi060'], ['mgr:smithi060'], [] + ), + # all_hosts + NodeAssignmentTest( + 'mgr', + PlacementSpec(host_pattern='*'), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + ], + None, None, + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + ['mgr:host3'], + [] + ), + # all_hosts + count_per_host + NodeAssignmentTest( + 'mds', + PlacementSpec(host_pattern='*', count_per_host=2), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mds', 'a', 'host1'), + DaemonDescription('mds', 'b', 'host2'), + ], + None, None, + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3'], + ['mds:host3', 'mds:host1', 'mds:host2', 'mds:host3'], + [] + ), + # count that is bigger than the amount of hosts. Truncate to len(hosts) + # mgr should not be co-located to each other. + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=4), + 'host1 host2 host3'.split(), + [], + None, None, + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + [] + ), + # count that is bigger than the amount of hosts; wrap around. + NodeAssignmentTest( + 'mds', + PlacementSpec(count=6), + 'host1 host2 host3'.split(), + [], + None, None, + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3'], + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3'], + [] + ), + # count + partial host list + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=3, hosts=['host3']), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + ], + None, None, + ['mgr:host3'], + ['mgr:host3'], + ['mgr.a', 'mgr.b'] + ), + # count + partial host list (with colo) + NodeAssignmentTest( + 'mds', + PlacementSpec(count=3, hosts=['host3']), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mds', 'a', 'host1'), + DaemonDescription('mds', 'b', 'host2'), + ], + None, None, + ['mds:host3', 'mds:host3', 'mds:host3'], + ['mds:host3', 'mds:host3', 'mds:host3'], + ['mds.a', 'mds.b'] + ), + # count 1 + partial host list + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=1, hosts=['host3']), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + ], + None, None, + ['mgr:host3'], + ['mgr:host3'], + ['mgr.a', 'mgr.b'] + ), + # count + partial host list + existing + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=2, hosts=['host3']), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + ], + None, None, + ['mgr:host3'], + ['mgr:host3'], + ['mgr.a'] + ), + # count + partial host list + existing (deterministic) + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=2, hosts=['host1']), + 'host1 host2'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + ], + None, None, + ['mgr:host1'], + [], + [] + ), + # count + partial host list + existing (deterministic) + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=2, hosts=['host1']), + 'host1 host2'.split(), + [ + DaemonDescription('mgr', 'a', 'host2'), + ], + None, None, + ['mgr:host1'], + ['mgr:host1'], + ['mgr.a'] + ), + # label only + NodeAssignmentTest( + 'mgr', + PlacementSpec(label='foo'), + 'host1 host2 host3'.split(), + [], + None, None, + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + [] + ), + # label + count (truncate to host list) + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=4, label='foo'), + 'host1 host2 host3'.split(), + [], + None, None, + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + ['mgr:host1', 'mgr:host2', 'mgr:host3'], + [] + ), + # label + count (with colo) + NodeAssignmentTest( + 'mds', + PlacementSpec(count=6, label='foo'), + 'host1 host2 host3'.split(), + [], + None, None, + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3'], + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3'], + [] + ), + # label only + count_per_hst + NodeAssignmentTest( + 'mds', + PlacementSpec(label='foo', count_per_host=3), + 'host1 host2 host3'.split(), + [], + None, None, + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3', + 'mds:host1', 'mds:host2', 'mds:host3'], + ['mds:host1', 'mds:host2', 'mds:host3', 'mds:host1', 'mds:host2', 'mds:host3', + 'mds:host1', 'mds:host2', 'mds:host3'], + [] + ), + # host_pattern + NodeAssignmentTest( + 'mgr', + PlacementSpec(host_pattern='mgr*'), + 'mgrhost1 mgrhost2 datahost'.split(), + [], + None, None, + ['mgr:mgrhost1', 'mgr:mgrhost2'], + ['mgr:mgrhost1', 'mgr:mgrhost2'], + [] + ), + # host_pattern + count_per_host + NodeAssignmentTest( + 'mds', + PlacementSpec(host_pattern='mds*', count_per_host=3), + 'mdshost1 mdshost2 datahost'.split(), + [], + None, None, + ['mds:mdshost1', 'mds:mdshost2', 'mds:mdshost1', 'mds:mdshost2', 'mds:mdshost1', 'mds:mdshost2'], + ['mds:mdshost1', 'mds:mdshost2', 'mds:mdshost1', 'mds:mdshost2', 'mds:mdshost1', 'mds:mdshost2'], + [] + ), + # label + count_per_host + ports + NodeAssignmentTest( + 'rgw', + PlacementSpec(count=6, label='foo'), + 'host1 host2 host3'.split(), + [], + None, None, + ['rgw:host1(*:80)', 'rgw:host2(*:80)', 'rgw:host3(*:80)', + 'rgw:host1(*:81)', 'rgw:host2(*:81)', 'rgw:host3(*:81)'], + ['rgw:host1(*:80)', 'rgw:host2(*:80)', 'rgw:host3(*:80)', + 'rgw:host1(*:81)', 'rgw:host2(*:81)', 'rgw:host3(*:81)'], + [] + ), + # label + count_per_host + ports (+ existing) + NodeAssignmentTest( + 'rgw', + PlacementSpec(count=6, label='foo'), + 'host1 host2 host3'.split(), + [ + DaemonDescription('rgw', 'a', 'host1', ports=[81]), + DaemonDescription('rgw', 'b', 'host2', ports=[80]), + DaemonDescription('rgw', 'c', 'host1', ports=[82]), + ], + None, None, + ['rgw:host1(*:80)', 'rgw:host2(*:80)', 'rgw:host3(*:80)', + 'rgw:host1(*:81)', 'rgw:host2(*:81)', 'rgw:host3(*:81)'], + ['rgw:host1(*:80)', 'rgw:host3(*:80)', + 'rgw:host2(*:81)', 'rgw:host3(*:81)'], + ['rgw.c'] + ), + # cephadm.py teuth case + NodeAssignmentTest( + 'mgr', + PlacementSpec(count=3, hosts=['host1=y', 'host2=x']), + 'host1 host2'.split(), + [ + DaemonDescription('mgr', 'y', 'host1'), + DaemonDescription('mgr', 'x', 'host2'), + ], + None, None, + ['mgr:host1(name=y)', 'mgr:host2(name=x)'], + [], [] + ), + + # note: host -> rank mapping is psuedo-random based on svc name, so these + # host/rank pairs may seem random but they match the nfs.mynfs seed used by + # the test. + + # ranked, fresh + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + [], + {}, + {0: {0: None}, 1: {0: None}, 2: {0: None}}, + ['nfs:host3(rank=0.0)', 'nfs:host2(rank=1.0)', 'nfs:host1(rank=2.0)'], + ['nfs:host3(rank=0.0)', 'nfs:host2(rank=1.0)', 'nfs:host1(rank=2.0)'], + [] + ), + # 21: ranked, exist + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.1', 'host1', rank=0, rank_generation=1), + ], + {0: {1: '0.1'}}, + {0: {1: '0.1'}, 1: {0: None}, 2: {0: None}}, + ['nfs:host1(rank=0.1)', 'nfs:host3(rank=1.0)', 'nfs:host2(rank=2.0)'], + ['nfs:host3(rank=1.0)', 'nfs:host2(rank=2.0)'], + [] + ), + # ranked, exist, different ranks + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.1', 'host1', rank=0, rank_generation=1), + DaemonDescription('nfs', '1.1', 'host2', rank=1, rank_generation=1), + ], + {0: {1: '0.1'}, 1: {1: '1.1'}}, + {0: {1: '0.1'}, 1: {1: '1.1'}, 2: {0: None}}, + ['nfs:host1(rank=0.1)', 'nfs:host2(rank=1.1)', 'nfs:host3(rank=2.0)'], + ['nfs:host3(rank=2.0)'], + [] + ), + # ranked, exist, different ranks (2) + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.1', 'host1', rank=0, rank_generation=1), + DaemonDescription('nfs', '1.1', 'host3', rank=1, rank_generation=1), + ], + {0: {1: '0.1'}, 1: {1: '1.1'}}, + {0: {1: '0.1'}, 1: {1: '1.1'}, 2: {0: None}}, + ['nfs:host1(rank=0.1)', 'nfs:host3(rank=1.1)', 'nfs:host2(rank=2.0)'], + ['nfs:host2(rank=2.0)'], + [] + ), + # ranked, exist, extra ranks + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.5', 'host1', rank=0, rank_generation=5), + DaemonDescription('nfs', '1.5', 'host2', rank=1, rank_generation=5), + DaemonDescription('nfs', '4.5', 'host2', rank=4, rank_generation=5), + ], + {0: {5: '0.5'}, 1: {5: '1.5'}}, + {0: {5: '0.5'}, 1: {5: '1.5'}, 2: {0: None}}, + ['nfs:host1(rank=0.5)', 'nfs:host2(rank=1.5)', 'nfs:host3(rank=2.0)'], + ['nfs:host3(rank=2.0)'], + ['nfs.4.5'] + ), + # 25: ranked, exist, extra ranks (scale down: kill off high rank) + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=2), + 'host3 host2 host1'.split(), + [ + DaemonDescription('nfs', '0.5', 'host1', rank=0, rank_generation=5), + DaemonDescription('nfs', '1.5', 'host2', rank=1, rank_generation=5), + DaemonDescription('nfs', '2.5', 'host3', rank=2, rank_generation=5), + ], + {0: {5: '0.5'}, 1: {5: '1.5'}, 2: {5: '2.5'}}, + {0: {5: '0.5'}, 1: {5: '1.5'}, 2: {5: '2.5'}}, + ['nfs:host1(rank=0.5)', 'nfs:host2(rank=1.5)'], + [], + ['nfs.2.5'] + ), + # ranked, exist, extra ranks (scale down hosts) + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=2), + 'host1 host3'.split(), + [ + DaemonDescription('nfs', '0.5', 'host1', rank=0, rank_generation=5), + DaemonDescription('nfs', '1.5', 'host2', rank=1, rank_generation=5), + DaemonDescription('nfs', '2.5', 'host3', rank=4, rank_generation=5), + ], + {0: {5: '0.5'}, 1: {5: '1.5'}, 2: {5: '2.5'}}, + {0: {5: '0.5'}, 1: {5: '1.5', 6: None}, 2: {5: '2.5'}}, + ['nfs:host1(rank=0.5)', 'nfs:host3(rank=1.6)'], + ['nfs:host3(rank=1.6)'], + ['nfs.2.5', 'nfs.1.5'] + ), + # ranked, exist, duplicate rank + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.0', 'host1', rank=0, rank_generation=0), + DaemonDescription('nfs', '1.1', 'host2', rank=1, rank_generation=1), + DaemonDescription('nfs', '1.2', 'host3', rank=1, rank_generation=2), + ], + {0: {0: '0.0'}, 1: {2: '1.2'}}, + {0: {0: '0.0'}, 1: {2: '1.2'}, 2: {0: None}}, + ['nfs:host1(rank=0.0)', 'nfs:host3(rank=1.2)', 'nfs:host2(rank=2.0)'], + ['nfs:host2(rank=2.0)'], + ['nfs.1.1'] + ), + # 28: ranked, all gens stale (failure during update cycle) + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.2', 'host1', rank=0, rank_generation=2), + DaemonDescription('nfs', '1.2', 'host2', rank=1, rank_generation=2), + ], + {0: {2: '0.2'}, 1: {2: '1.2', 3: '1.3'}}, + {0: {2: '0.2'}, 1: {2: '1.2', 3: '1.3', 4: None}}, + ['nfs:host1(rank=0.2)', 'nfs:host3(rank=1.4)'], + ['nfs:host3(rank=1.4)'], + ['nfs.1.2'] + ), + # ranked, not enough hosts + NodeAssignmentTest( + 'nfs', + PlacementSpec(count=4), + 'host1 host2 host3'.split(), + [ + DaemonDescription('nfs', '0.2', 'host1', rank=0, rank_generation=2), + DaemonDescription('nfs', '1.2', 'host2', rank=1, rank_generation=2), + ], + {0: {2: '0.2'}, 1: {2: '1.2'}}, + {0: {2: '0.2'}, 1: {2: '1.2'}, 2: {0: None}}, + ['nfs:host1(rank=0.2)', 'nfs:host2(rank=1.2)', 'nfs:host3(rank=2.0)'], + ['nfs:host3(rank=2.0)'], + [] + ), + # ranked, scale down + NodeAssignmentTest( + 'nfs', + PlacementSpec(hosts=['host2']), + 'host1 host2'.split(), + [ + DaemonDescription('nfs', '0.2', 'host1', rank=0, rank_generation=2), + DaemonDescription('nfs', '1.2', 'host2', rank=1, rank_generation=2), + DaemonDescription('nfs', '2.2', 'host3', rank=2, rank_generation=2), + ], + {0: {2: '0.2'}, 1: {2: '1.2'}, 2: {2: '2.2'}}, + {0: {2: '0.2', 3: None}, 1: {2: '1.2'}, 2: {2: '2.2'}}, + ['nfs:host2(rank=0.3)'], + ['nfs:host2(rank=0.3)'], + ['nfs.0.2', 'nfs.1.2', 'nfs.2.2'] + ), + + ]) +def test_node_assignment(service_type, placement, hosts, daemons, rank_map, post_rank_map, + expected, expected_add, expected_remove): + spec = None + service_id = None + allow_colo = False + if service_type == 'rgw': + service_id = 'realm.zone' + allow_colo = True + elif service_type == 'mds': + service_id = 'myfs' + allow_colo = True + elif service_type == 'nfs': + service_id = 'mynfs' + spec = ServiceSpec(service_type=service_type, + service_id=service_id, + placement=placement) + + if not spec: + spec = ServiceSpec(service_type=service_type, + service_id=service_id, + placement=placement) + + all_slots, to_add, to_remove = HostAssignment( + spec=spec, + hosts=[HostSpec(h, labels=['foo']) for h in hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + allow_colo=allow_colo, + rank_map=rank_map, + ).place() + + assert rank_map == post_rank_map + + got = [str(p) for p in all_slots] + num_wildcard = 0 + for i in expected: + if i == '*': + num_wildcard += 1 + else: + assert i in got + got.remove(i) + assert num_wildcard == len(got) + + got = [str(p) for p in to_add] + num_wildcard = 0 + for i in expected_add: + if i == '*': + num_wildcard += 1 + else: + assert i in got + got.remove(i) + assert num_wildcard == len(got) + + assert sorted([d.name() for d in to_remove]) == sorted(expected_remove) + + +class NodeAssignmentTest5(NamedTuple): + service_type: str + placement: PlacementSpec + available_hosts: List[str] + candidates_hosts: List[str] + + +@pytest.mark.parametrize("service_type, placement, available_hosts, expected_candidates", + [ # noqa: E128 + NodeAssignmentTest5( + 'alertmanager', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host3 host1 host4 host2'.split(), + ), + NodeAssignmentTest5( + 'prometheus', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host3 host2 host4 host1'.split(), + ), + NodeAssignmentTest5( + 'grafana', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host1 host2 host4 host3'.split(), + ), + NodeAssignmentTest5( + 'mgr', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host4 host2 host1 host3'.split(), + ), + NodeAssignmentTest5( + 'mon', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host1 host3 host4 host2'.split(), + ), + NodeAssignmentTest5( + 'rgw', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host1 host3 host2 host4'.split(), + ), + NodeAssignmentTest5( + 'cephfs-mirror', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host4 host3 host1 host2'.split(), + ), + ]) +def test_node_assignment_random_shuffle(service_type, placement, available_hosts, expected_candidates): + spec = None + service_id = None + allow_colo = False + spec = ServiceSpec(service_type=service_type, + service_id=service_id, + placement=placement) + + candidates = HostAssignment( + spec=spec, + hosts=[HostSpec(h, labels=['foo']) for h in available_hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=[], + allow_colo=allow_colo, + ).get_candidates() + + candidates_hosts = [h.hostname for h in candidates] + assert candidates_hosts == expected_candidates + + +class NodeAssignmentTest2(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + daemons: List[DaemonDescription] + expected_len: int + in_set: List[str] + + +@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected_len,in_set", + [ # noqa: E128 + # just count + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [], + 1, + ['host1', 'host2', 'host3'], + ), + + # hosts + (smaller) count + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=1, hosts='host1 host2'.split()), + 'host1 host2'.split(), + [], + 1, + ['host1', 'host2'], + ), + # hosts + (smaller) count, existing + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=1, hosts='host1 host2 host3'.split()), + 'host1 host2 host3'.split(), + [DaemonDescription('mgr', 'mgr.a', 'host1')], + 1, + ['host1', 'host2', 'host3'], + ), + # hosts + (smaller) count, (more) existing + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=1, hosts='host1 host2 host3'.split()), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + ], + 1, + ['host1', 'host2'] + ), + # count + partial host list + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=2, hosts=['host3']), + 'host1 host2 host3'.split(), + [], + 1, + ['host1', 'host2', 'host3'] + ), + # label + count + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=1, label='foo'), + 'host1 host2 host3'.split(), + [], + 1, + ['host1', 'host2', 'host3'] + ), + ]) +def test_node_assignment2(service_type, placement, hosts, + daemons, expected_len, in_set): + hosts, to_add, to_remove = HostAssignment( + spec=ServiceSpec(service_type, placement=placement), + hosts=[HostSpec(h, labels=['foo']) for h in hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + ).place() + assert len(hosts) == expected_len + for h in [h.hostname for h in hosts]: + assert h in in_set + + +@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected_len,must_have", + [ # noqa: E128 + # hosts + (smaller) count, (more) existing + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=3, hosts='host3'.split()), + 'host1 host2 host3'.split(), + [], + 1, + ['host3'] + ), + # count + partial host list + NodeAssignmentTest2( + 'mgr', + PlacementSpec(count=2, hosts=['host3']), + 'host1 host2 host3'.split(), + [], + 1, + ['host3'] + ), + ]) +def test_node_assignment3(service_type, placement, hosts, + daemons, expected_len, must_have): + hosts, to_add, to_remove = HostAssignment( + spec=ServiceSpec(service_type, placement=placement), + hosts=[HostSpec(h) for h in hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + ).place() + assert len(hosts) == expected_len + for h in must_have: + assert h in [h.hostname for h in hosts] + + +class NodeAssignmentTest4(NamedTuple): + spec: ServiceSpec + networks: Dict[str, Dict[str, Dict[str, List[str]]]] + daemons: List[DaemonDescription] + expected: List[str] + expected_add: List[str] + expected_remove: List[DaemonDescription] + + +@pytest.mark.parametrize("spec,networks,daemons,expected,expected_add,expected_remove", + [ # noqa: E128 + NodeAssignmentTest4( + ServiceSpec( + service_type='rgw', + service_id='foo', + placement=PlacementSpec(count=6, label='foo'), + networks=['10.0.0.0/8'], + ), + { + 'host1': {'10.0.0.0/8': {'eth0': ['10.0.0.1']}}, + 'host2': {'10.0.0.0/8': {'eth0': ['10.0.0.2']}}, + 'host3': {'192.168.0.0/16': {'eth0': ['192.168.0.1']}}, + }, + [], + ['rgw:host1(10.0.0.1:80)', 'rgw:host2(10.0.0.2:80)', + 'rgw:host1(10.0.0.1:81)', 'rgw:host2(10.0.0.2:81)', + 'rgw:host1(10.0.0.1:82)', 'rgw:host2(10.0.0.2:82)'], + ['rgw:host1(10.0.0.1:80)', 'rgw:host2(10.0.0.2:80)', + 'rgw:host1(10.0.0.1:81)', 'rgw:host2(10.0.0.2:81)', + 'rgw:host1(10.0.0.1:82)', 'rgw:host2(10.0.0.2:82)'], + [] + ), + NodeAssignmentTest4( + IngressSpec( + service_type='ingress', + service_id='rgw.foo', + frontend_port=443, + monitor_port=8888, + virtual_ip='10.0.0.20/8', + backend_service='rgw.foo', + placement=PlacementSpec(label='foo'), + networks=['10.0.0.0/8'], + ), + { + 'host1': {'10.0.0.0/8': {'eth0': ['10.0.0.1']}}, + 'host2': {'10.0.0.0/8': {'eth1': ['10.0.0.2']}}, + 'host3': {'192.168.0.0/16': {'eth2': ['192.168.0.1']}}, + }, + [], + ['haproxy:host1(10.0.0.1:443,8888)', 'haproxy:host2(10.0.0.2:443,8888)', + 'keepalived:host1', 'keepalived:host2'], + ['haproxy:host1(10.0.0.1:443,8888)', 'haproxy:host2(10.0.0.2:443,8888)', + 'keepalived:host1', 'keepalived:host2'], + [] + ), + NodeAssignmentTest4( + IngressSpec( + service_type='ingress', + service_id='rgw.foo', + frontend_port=443, + monitor_port=8888, + virtual_ip='10.0.0.20/8', + backend_service='rgw.foo', + placement=PlacementSpec(label='foo'), + networks=['10.0.0.0/8'], + ), + { + 'host1': {'10.0.0.0/8': {'eth0': ['10.0.0.1']}}, + 'host2': {'10.0.0.0/8': {'eth1': ['10.0.0.2']}}, + 'host3': {'192.168.0.0/16': {'eth2': ['192.168.0.1']}}, + }, + [ + DaemonDescription('haproxy', 'a', 'host1', ip='10.0.0.1', + ports=[443, 8888]), + DaemonDescription('keepalived', 'b', 'host2'), + DaemonDescription('keepalived', 'c', 'host3'), + ], + ['haproxy:host1(10.0.0.1:443,8888)', 'haproxy:host2(10.0.0.2:443,8888)', + 'keepalived:host1', 'keepalived:host2'], + ['haproxy:host2(10.0.0.2:443,8888)', + 'keepalived:host1'], + ['keepalived.c'] + ), + ]) +def test_node_assignment4(spec, networks, daemons, + expected, expected_add, expected_remove): + all_slots, to_add, to_remove = HostAssignment( + spec=spec, + hosts=[HostSpec(h, labels=['foo']) for h in networks.keys()], + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + allow_colo=True, + networks=networks, + primary_daemon_type='haproxy' if spec.service_type == 'ingress' else spec.service_type, + per_host_daemon_type='keepalived' if spec.service_type == 'ingress' else None, + ).place() + + got = [str(p) for p in all_slots] + num_wildcard = 0 + for i in expected: + if i == '*': + num_wildcard += 1 + else: + assert i in got + got.remove(i) + assert num_wildcard == len(got) + + got = [str(p) for p in to_add] + num_wildcard = 0 + for i in expected_add: + if i == '*': + num_wildcard += 1 + else: + assert i in got + got.remove(i) + assert num_wildcard == len(got) + + assert sorted([d.name() for d in to_remove]) == sorted(expected_remove) + + +@pytest.mark.parametrize("placement", + [ # noqa: E128 + ('1 *'), + ('* label:foo'), + ('* host1 host2'), + ('hostname12hostname12hostname12hostname12hostname12hostname12hostname12'), # > 63 chars + ]) +def test_bad_placements(placement): + try: + PlacementSpec.from_string(placement.split(' ')) + assert False + except SpecValidationError: + pass + + +class NodeAssignmentTestBadSpec(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + daemons: List[DaemonDescription] + expected: str + + +@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected", + [ # noqa: E128 + # unknown host + NodeAssignmentTestBadSpec( + 'mgr', + PlacementSpec(hosts=['unknownhost']), + ['knownhost'], + [], + "Cannot place on unknownhost: Unknown hosts" + ), + # unknown host pattern + NodeAssignmentTestBadSpec( + 'mgr', + PlacementSpec(host_pattern='unknownhost'), + ['knownhost'], + [], + "Cannot place : No matching hosts" + ), + # unknown label + NodeAssignmentTestBadSpec( + 'mgr', + PlacementSpec(label='unknownlabel'), + [], + [], + "Cannot place : No matching hosts for label unknownlabel" + ), + ]) +def test_bad_specs(service_type, placement, hosts, daemons, expected): + with pytest.raises(OrchestratorValidationError) as e: + hosts, to_add, to_remove = HostAssignment( + spec=ServiceSpec(service_type, placement=placement), + hosts=[HostSpec(h) for h in hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + ).place() + assert str(e.value) == expected + + +class ActiveAssignmentTest(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + daemons: List[DaemonDescription] + expected: List[List[str]] + expected_add: List[List[str]] + expected_remove: List[List[str]] + + +@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected,expected_add,expected_remove", + [ + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1', is_active=True), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3'), + ], + [['host1', 'host2'], ['host1', 'host3']], + [[]], + [['mgr.b'], ['mgr.c']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host1', 'host3'], ['host2', 'host3']], + [[]], + [['mgr.a'], ['mgr.b']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2', is_active=True), + DaemonDescription('mgr', 'c', 'host3'), + ], + [['host2']], + [[]], + [['mgr.a', 'mgr.c']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host3']], + [[]], + [['mgr.a', 'mgr.b']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1', is_active=True), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host1'], ['host3']], + [[]], + [['mgr.a', 'mgr.b'], ['mgr.b', 'mgr.c']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2', is_active=True), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host2', 'host3']], + [[]], + [['mgr.a']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1', is_active=True), + DaemonDescription('mgr', 'b', 'host2', is_active=True), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host1'], ['host2'], ['host3']], + [[]], + [['mgr.a', 'mgr.b'], ['mgr.b', 'mgr.c'], ['mgr.a', 'mgr.c']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1', is_active=True), + DaemonDescription('mgr', 'a2', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3'), + ], + [['host1']], + [[]], + [['mgr.a2', 'mgr.b', 'mgr.c']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1', is_active=True), + DaemonDescription('mgr', 'a2', 'host1', is_active=True), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3'), + ], + [['host1']], + [[]], + [['mgr.a', 'mgr.b', 'mgr.c'], ['mgr.a2', 'mgr.b', 'mgr.c']] + ), + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1', is_active=True), + DaemonDescription('mgr', 'a2', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host1', 'host3']], + [[]], + [['mgr.a2', 'mgr.b']] + ), + # Explicit placement should override preference for active daemon + ActiveAssignmentTest( + 'mgr', + PlacementSpec(count=1, hosts=['host1']), + 'host1 host2 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [['host1']], + [[]], + [['mgr.b', 'mgr.c']] + ), + + ]) +def test_active_assignment(service_type, placement, hosts, daemons, expected, expected_add, expected_remove): + + spec = ServiceSpec(service_type=service_type, + service_id=None, + placement=placement) + + hosts, to_add, to_remove = HostAssignment( + spec=spec, + hosts=[HostSpec(h) for h in hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=daemons, + ).place() + assert sorted([h.hostname for h in hosts]) in expected + assert sorted([h.hostname for h in to_add]) in expected_add + assert sorted([h.name() for h in to_remove]) in expected_remove + + +class UnreachableHostsTest(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + unreachables_hosts: List[str] + daemons: List[DaemonDescription] + expected_add: List[List[str]] + expected_remove: List[List[str]] + + +@pytest.mark.parametrize("service_type,placement,hosts,unreachable_hosts,daemons,expected_add,expected_remove", + [ + UnreachableHostsTest( + 'mgr', + PlacementSpec(count=3), + 'host1 host2 host3'.split(), + ['host2'], + [], + [['host1', 'host3']], + [[]], + ), + UnreachableHostsTest( + 'mgr', + PlacementSpec(hosts=['host3']), + 'host1 host2 host3'.split(), + ['host1'], + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [[]], + [['mgr.b']], + ), + UnreachableHostsTest( + 'mgr', + PlacementSpec(count=3), + 'host1 host2 host3 host4'.split(), + ['host1'], + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [[]], + [[]], + ), + UnreachableHostsTest( + 'mgr', + PlacementSpec(count=1), + 'host1 host2 host3 host4'.split(), + 'host1 host3'.split(), + [ + DaemonDescription('mgr', 'a', 'host1'), + DaemonDescription('mgr', 'b', 'host2'), + DaemonDescription('mgr', 'c', 'host3', is_active=True), + ], + [[]], + [['mgr.b']], + ), + UnreachableHostsTest( + 'mgr', + PlacementSpec(count=3), + 'host1 host2 host3 host4'.split(), + ['host2'], + [], + [['host1', 'host3', 'host4']], + [[]], + ), + UnreachableHostsTest( + 'mgr', + PlacementSpec(count=3), + 'host1 host2 host3 host4'.split(), + 'host1 host4'.split(), + [], + [['host2', 'host3']], + [[]], + ), + + ]) +def test_unreachable_host(service_type, placement, hosts, unreachable_hosts, daemons, expected_add, expected_remove): + + spec = ServiceSpec(service_type=service_type, + service_id=None, + placement=placement) + + hosts, to_add, to_remove = HostAssignment( + spec=spec, + hosts=[HostSpec(h) for h in hosts], + unreachable_hosts=[HostSpec(h) for h in unreachable_hosts], + draining_hosts=[], + daemons=daemons, + ).place() + assert sorted([h.hostname for h in to_add]) in expected_add + assert sorted([h.name() for h in to_remove]) in expected_remove + + +class RescheduleFromOfflineTest(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + maintenance_hosts: List[str] + offline_hosts: List[str] + daemons: List[DaemonDescription] + expected_add: List[List[str]] + expected_remove: List[List[str]] + + +@pytest.mark.parametrize("service_type,placement,hosts,maintenance_hosts,offline_hosts,daemons,expected_add,expected_remove", + [ + RescheduleFromOfflineTest( + 'nfs', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [], + ['host2'], + [ + DaemonDescription('nfs', 'a', 'host1'), + DaemonDescription('nfs', 'b', 'host2'), + ], + [['host3']], + [[]], + ), + RescheduleFromOfflineTest( + 'nfs', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + ['host2'], + [], + [ + DaemonDescription('nfs', 'a', 'host1'), + DaemonDescription('nfs', 'b', 'host2'), + ], + [[]], + [[]], + ), + RescheduleFromOfflineTest( + 'mon', + PlacementSpec(count=2), + 'host1 host2 host3'.split(), + [], + ['host2'], + [ + DaemonDescription('mon', 'a', 'host1'), + DaemonDescription('mon', 'b', 'host2'), + ], + [[]], + [[]], + ), + RescheduleFromOfflineTest( + 'ingress', + PlacementSpec(count=1), + 'host1 host2'.split(), + [], + ['host2'], + [ + DaemonDescription('haproxy', 'b', 'host2'), + DaemonDescription('keepalived', 'b', 'host2'), + ], + [['host1']], + [[]], + ), + ]) +def test_remove_from_offline(service_type, placement, hosts, maintenance_hosts, offline_hosts, daemons, expected_add, expected_remove): + + if service_type == 'ingress': + spec = \ + IngressSpec( + service_type='ingress', + service_id='nfs-ha.foo', + frontend_port=443, + monitor_port=8888, + virtual_ip='10.0.0.20/8', + backend_service='nfs-ha.foo', + placement=placement, + ) + else: + spec = \ + ServiceSpec( + service_type=service_type, + service_id='test', + placement=placement, + ) + + host_specs = [HostSpec(h) for h in hosts] + for h in host_specs: + if h.hostname in offline_hosts: + h.status = 'offline' + if h.hostname in maintenance_hosts: + h.status = 'maintenance' + + hosts, to_add, to_remove = HostAssignment( + spec=spec, + hosts=host_specs, + unreachable_hosts=[h for h in host_specs if h.status], + draining_hosts=[], + daemons=daemons, + ).place() + assert sorted([h.hostname for h in to_add]) in expected_add + assert sorted([h.name() for h in to_remove]) in expected_remove + + +class DrainExplicitPlacementTest(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + maintenance_hosts: List[str] + offline_hosts: List[str] + draining_hosts: List[str] + daemons: List[DaemonDescription] + expected_add: List[List[str]] + expected_remove: List[List[str]] + + +@pytest.mark.parametrize("service_type,placement,hosts,maintenance_hosts,offline_hosts,draining_hosts,daemons,expected_add,expected_remove", + [ + DrainExplicitPlacementTest( + 'crash', + PlacementSpec(hosts='host1 host2 host3'.split()), + 'host1 host2 host3 host4'.split(), + [], + [], + ['host3'], + [ + DaemonDescription('crash', 'host1', 'host1'), + DaemonDescription('crash', 'host2', 'host2'), + DaemonDescription('crash', 'host3', 'host3'), + ], + [[]], + [['crash.host3']], + ), + DrainExplicitPlacementTest( + 'crash', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + [], + [], + ['host1', 'host4'], + [ + DaemonDescription('crash', 'host1', 'host1'), + DaemonDescription('crash', 'host3', 'host3'), + ], + [['host2']], + [['crash.host1']], + ), + ]) +def test_drain_from_explict_placement(service_type, placement, hosts, maintenance_hosts, offline_hosts, draining_hosts, daemons, expected_add, expected_remove): + + spec = ServiceSpec(service_type=service_type, + service_id='test', + placement=placement) + + host_specs = [HostSpec(h) for h in hosts] + draining_host_specs = [HostSpec(h) for h in draining_hosts] + for h in host_specs: + if h.hostname in offline_hosts: + h.status = 'offline' + if h.hostname in maintenance_hosts: + h.status = 'maintenance' + + hosts, to_add, to_remove = HostAssignment( + spec=spec, + hosts=host_specs, + unreachable_hosts=[h for h in host_specs if h.status], + draining_hosts=draining_host_specs, + daemons=daemons, + ).place() + assert sorted([h.hostname for h in to_add]) in expected_add + assert sorted([h.name() for h in to_remove]) in expected_remove diff --git a/src/pybind/mgr/cephadm/tests/test_service_discovery.py b/src/pybind/mgr/cephadm/tests/test_service_discovery.py new file mode 100644 index 000000000..ff98a1388 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_service_discovery.py @@ -0,0 +1,178 @@ +from unittest.mock import MagicMock +from cephadm.service_discovery import Root + + +class FakeDaemonDescription: + def __init__(self, ip, ports, hostname, service_name='', daemon_type=''): + self.ip = ip + self.ports = ports + self.hostname = hostname + self._service_name = service_name + self.daemon_type = daemon_type + + def service_name(self): + return self._service_name + + +class FakeCache: + def get_daemons_by_service(self, service_type): + if service_type == 'ceph-exporter': + return [FakeDaemonDescription('1.2.3.4', [9926], 'node0'), + FakeDaemonDescription('1.2.3.5', [9926], 'node1')] + + return [FakeDaemonDescription('1.2.3.4', [9100], 'node0'), + FakeDaemonDescription('1.2.3.5', [9200], 'node1')] + + def get_daemons_by_type(self, daemon_type): + return [FakeDaemonDescription('1.2.3.4', [9100], 'node0', 'ingress', 'haproxy'), + FakeDaemonDescription('1.2.3.5', [9200], 'node1', 'ingress', 'haproxy')] + + +class FakeInventory: + def get_addr(self, name: str): + return '1.2.3.4' + + +class FakeServiceSpec: + def __init__(self, port): + self.monitor_port = port + + +class FakeSpecDescription: + def __init__(self, port): + self.spec = FakeServiceSpec(port) + + +class FakeSpecStore(): + def __init__(self, mgr): + self.mgr = mgr + self._specs = {'ingress': FakeSpecDescription(9049)} + + def __contains__(self, name): + return name in self._specs + + def __getitem__(self, name): + return self._specs['ingress'] + + +class FakeMgr: + def __init__(self): + self.config = '' + self.check_mon_command = MagicMock(side_effect=self._check_mon_command) + self.mon_command = MagicMock(side_effect=self._check_mon_command) + self.template = MagicMock() + self.log = MagicMock() + self.inventory = FakeInventory() + self.cache = FakeCache() + self.spec_store = FakeSpecStore(self) + + def get_mgr_id(self): + return 'mgr-1' + + def list_servers(self): + + servers = [ + {'hostname': 'node0', + 'ceph_version': '16.2', + 'services': [{'type': 'mgr', 'id': 'mgr-1'}, {'type': 'mon'}]}, + {'hostname': 'node1', + 'ceph_version': '16.2', + 'services': [{'type': 'mgr', 'id': 'mgr-2'}, {'type': 'mon'}]} + ] + + return servers + + def _check_mon_command(self, cmd_dict, inbuf=None): + prefix = cmd_dict.get('prefix') + if prefix == 'get-cmd': + return 0, self.config, '' + if prefix == 'set-cmd': + self.config = cmd_dict.get('value') + return 0, 'value set', '' + return -1, '', 'error' + + def get_module_option_ex(self, module, option, default_value): + return "9283" + + +class TestServiceDiscovery: + + def test_get_sd_config_prometheus(self): + mgr = FakeMgr() + root = Root(mgr, 5000, '0.0.0.0') + cfg = root.get_sd_config('mgr-prometheus') + + # check response structure + assert cfg + for entry in cfg: + assert 'labels' in entry + assert 'targets' in entry + + # check content + assert cfg[0]['targets'] == ['node0:9283'] + + def test_get_sd_config_node_exporter(self): + mgr = FakeMgr() + root = Root(mgr, 5000, '0.0.0.0') + cfg = root.get_sd_config('node-exporter') + + # check response structure + assert cfg + for entry in cfg: + assert 'labels' in entry + assert 'targets' in entry + + # check content + assert cfg[0]['targets'] == ['1.2.3.4:9100'] + assert cfg[0]['labels'] == {'instance': 'node0'} + assert cfg[1]['targets'] == ['1.2.3.5:9200'] + assert cfg[1]['labels'] == {'instance': 'node1'} + + def test_get_sd_config_alertmgr(self): + mgr = FakeMgr() + root = Root(mgr, 5000, '0.0.0.0') + cfg = root.get_sd_config('alertmanager') + + # check response structure + assert cfg + for entry in cfg: + assert 'labels' in entry + assert 'targets' in entry + + # check content + assert cfg[0]['targets'] == ['1.2.3.4:9100', '1.2.3.5:9200'] + + def test_get_sd_config_haproxy(self): + mgr = FakeMgr() + root = Root(mgr, 5000, '0.0.0.0') + cfg = root.get_sd_config('haproxy') + + # check response structure + assert cfg + for entry in cfg: + assert 'labels' in entry + assert 'targets' in entry + + # check content + assert cfg[0]['targets'] == ['1.2.3.4:9049'] + assert cfg[0]['labels'] == {'instance': 'ingress'} + + def test_get_sd_config_ceph_exporter(self): + mgr = FakeMgr() + root = Root(mgr, 5000, '0.0.0.0') + cfg = root.get_sd_config('ceph-exporter') + + # check response structure + assert cfg + for entry in cfg: + assert 'labels' in entry + assert 'targets' in entry + + # check content + assert cfg[0]['targets'] == ['1.2.3.4:9926'] + + def test_get_sd_config_invalid_service(self): + mgr = FakeMgr() + root = Root(mgr, 5000, '0.0.0.0') + cfg = root.get_sd_config('invalid-service') + assert cfg == [] diff --git a/src/pybind/mgr/cephadm/tests/test_services.py b/src/pybind/mgr/cephadm/tests/test_services.py new file mode 100644 index 000000000..2300b288d --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_services.py @@ -0,0 +1,2725 @@ +from textwrap import dedent +import json +import urllib.parse +import yaml +from mgr_util import build_url + +import pytest + +from unittest.mock import MagicMock, call, patch, ANY + +from cephadm.serve import CephadmServe +from cephadm.services.cephadmservice import MonService, MgrService, MdsService, RgwService, \ + RbdMirrorService, CrashService, CephadmDaemonDeploySpec +from cephadm.services.iscsi import IscsiService +from cephadm.services.nfs import NFSService +from cephadm.services.nvmeof import NvmeofService +from cephadm.services.osd import OSDService +from cephadm.services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \ + NodeExporterService, LokiService, PromtailService +from cephadm.module import CephadmOrchestrator +from ceph.deployment.service_spec import IscsiServiceSpec, MonitoringSpec, AlertManagerSpec, \ + ServiceSpec, RGWSpec, GrafanaSpec, SNMPGatewaySpec, IngressSpec, PlacementSpec, TracingSpec, \ + PrometheusSpec, CephExporterSpec, NFSServiceSpec, NvmeofServiceSpec +from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect + +from ceph.utils import datetime_now + +from orchestrator import OrchestratorError +from orchestrator._interface import DaemonDescription + +from typing import Dict, List + +grafana_cert = """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQDIZSujNBlKaLJzmvntjukjMA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzEzMTE0NzA3WhcN\nMzIwNzEwMTE0NzA3WjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyMe4DMA+MeYK7BHZMHB\nq7zjliEOcNgxomjU8qbf5USF7Mqrf6+/87XWqj4pCyAW8x0WXEr6A56a+cmBVmt+\nqtWDzl020aoId6lL5EgLLn6/kMDCCJLq++Lg9cEofMSvcZh+lY2f+1p+C+00xent\nrLXvXGOilAZWaQfojT2BpRnNWWIFbpFwlcKrlg2G0cFjV5c1m6a0wpsQ9JHOieq0\nSvwCixajwq3CwAYuuiU1wjI4oJO4Io1+g8yB3nH2Mo/25SApCxMXuXh4kHLQr/T4\n4hqisvG4uJYgKMcSIrWj5o25mclByGi1UI/kZkCUES94i7Z/3ihx4Bad0AMs/9tw\nFwIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQAf+pwz7Gd7mDwU2LY0TQXsK6/8KGzh\nHuX+ErOb8h5cOAbvCnHjyJFWf6gCITG98k9nxU9NToG0WYuNm/max1y/54f0dtxZ\npUo6KSNl3w6iYCfGOeUIj8isi06xMmeTgMNzv8DYhDt+P2igN6LenqWTVztogkiV\nxQ5ZJFFLEw4sN0CXnrZX3t5ruakxLXLTLKeE0I91YJvjClSBGkVJq26wOKQNHMhx\npWxeydQ5EgPZY+Aviz5Dnxe8aB7oSSovpXByzxURSabOuCK21awW5WJCGNpmqhWK\nZzACBDEstccj57c4OGV0eayHJRsluVr2e9NHRINZA3qdB37e6gsI1xHo\n-----END CERTIFICATE-----\n""" + +grafana_key = """-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLIx7gMwD4x5gr\nsEdkwcGrvOOWIQ5w2DGiaNTypt/lRIXsyqt/r7/ztdaqPikLIBbzHRZcSvoDnpr5\nyYFWa36q1YPOXTbRqgh3qUvkSAsufr+QwMIIkur74uD1wSh8xK9xmH6VjZ/7Wn4L\n7TTF6e2ste9cY6KUBlZpB+iNPYGlGc1ZYgVukXCVwquWDYbRwWNXlzWbprTCmxD0\nkc6J6rRK/AKLFqPCrcLABi66JTXCMjigk7gijX6DzIHecfYyj/blICkLExe5eHiQ\nctCv9PjiGqKy8bi4liAoxxIitaPmjbmZyUHIaLVQj+RmQJQRL3iLtn/eKHHgFp3Q\nAyz/23AXAgMBAAECggEAVoTB3Mm8azlPlaQB9GcV3tiXslSn+uYJ1duCf0sV52dV\nBzKW8s5fGiTjpiTNhGCJhchowqxoaew+o47wmGc2TvqbpeRLuecKrjScD0GkCYyQ\neM2wlshEbz4FhIZdgS6gbuh9WaM1dW/oaZoBNR5aTYo7xYTmNNeyLA/jO2zr7+4W\n5yES1lMSBXpKk7bDGKYY4bsX2b5RLr2Grh2u2bp7hoLABCEvuu8tSQdWXLEXWpXo\njwmV3hc6tabypIa0mj2Dmn2Dmt1ppSO0AZWG/WAizN3f4Z0r/u9HnbVrVmh0IEDw\n3uf2LP5o3msG9qKCbzv3lMgt9mMr70HOKnJ8ohMSKQKBgQDLkNb+0nr152HU9AeJ\nvdz8BeMxcwxCG77iwZphZ1HprmYKvvXgedqWtS6FRU+nV6UuQoPUbQxJBQzrN1Qv\nwKSlOAPCrTJgNgF/RbfxZTrIgCPuK2KM8I89VZv92TSGi362oQA4MazXC8RAWjoJ\nSu1/PHzK3aXOfVNSLrOWvIYeZQKBgQD/dgT6RUXKg0UhmXj7ExevV+c7oOJTDlMl\nvLngrmbjRgPO9VxLnZQGdyaBJeRngU/UXfNgajT/MU8B5fSKInnTMawv/tW7634B\nw3v6n5kNIMIjJmENRsXBVMllDTkT9S7ApV+VoGnXRccbTiDapBThSGd0wri/CuwK\nNWK1YFOeywKBgEDyI/XG114PBUJ43NLQVWm+wx5qszWAPqV/2S5MVXD1qC6zgCSv\nG9NLWN1CIMimCNg6dm7Wn73IM7fzvhNCJgVkWqbItTLG6DFf3/DPODLx1wTMqLOI\nqFqMLqmNm9l1Nec0dKp5BsjRQzq4zp1aX21hsfrTPmwjxeqJZdioqy2VAoGAXR5X\nCCdSHlSlUW8RE2xNOOQw7KJjfWT+WAYoN0c7R+MQplL31rRU7dpm1bLLRBN11vJ8\nMYvlT5RYuVdqQSP6BkrX+hLJNBvOLbRlL+EXOBrVyVxHCkDe+u7+DnC4epbn+N8P\nLYpwqkDMKB7diPVAizIKTBxinXjMu5fkKDs5n+sCgYBbZheYKk5M0sIxiDfZuXGB\nkf4mJdEkTI1KUGRdCwO/O7hXbroGoUVJTwqBLi1tKqLLarwCITje2T200BYOzj82\nqwRkCXGtXPKnxYEEUOiFx9OeDrzsZV00cxsEnX0Zdj+PucQ/J3Cvd0dWUspJfLHJ\n39gnaegswnz9KMQAvzKFdg==\n-----END PRIVATE KEY-----\n""" + + +class FakeInventory: + def get_addr(self, name: str) -> str: + return '1.2.3.4' + + +class FakeMgr: + def __init__(self): + self.config = '' + self.set_mon_crush_locations: Dict[str, List[str]] = {} + self.check_mon_command = MagicMock(side_effect=self._check_mon_command) + self.mon_command = MagicMock(side_effect=self._check_mon_command) + self.template = MagicMock() + self.log = MagicMock() + self.inventory = FakeInventory() + + def _check_mon_command(self, cmd_dict, inbuf=None): + prefix = cmd_dict.get('prefix') + if prefix == 'get-cmd': + return 0, self.config, '' + if prefix == 'set-cmd': + self.config = cmd_dict.get('value') + return 0, 'value set', '' + if prefix in ['auth get']: + return 0, '[foo]\nkeyring = asdf\n', '' + if prefix == 'quorum_status': + # actual quorum status output from testing + # note in this output all of the mons have blank crush locations + return 0, """{"election_epoch": 14, "quorum": [0, 1, 2], "quorum_names": ["vm-00", "vm-01", "vm-02"], "quorum_leader_name": "vm-00", "quorum_age": 101, "features": {"quorum_con": "4540138322906710015", "quorum_mon": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"]}, "monmap": {"epoch": 3, "fsid": "9863e1b8-6f24-11ed-8ad8-525400c13ad2", "modified": "2022-11-28T14:00:29.972488Z", "created": "2022-11-28T13:57:55.847497Z", "min_mon_release": 18, "min_mon_release_name": "reef", "election_strategy": 1, "disallowed_leaders: ": "", "stretch_mode": false, "tiebreaker_mon": "", "features": {"persistent": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"], "optional": []}, "mons": [{"rank": 0, "name": "vm-00", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.61:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.61:6789", "nonce": 0}]}, "addr": "192.168.122.61:6789/0", "public_addr": "192.168.122.61:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 1, "name": "vm-01", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.63:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.63:6789", "nonce": 0}]}, "addr": "192.168.122.63:6789/0", "public_addr": "192.168.122.63:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 2, "name": "vm-02", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.82:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.82:6789", "nonce": 0}]}, "addr": "192.168.122.82:6789/0", "public_addr": "192.168.122.82:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}]}}""", '' + if prefix == 'mon set_location': + self.set_mon_crush_locations[cmd_dict.get('name')] = cmd_dict.get('args') + return 0, '', '' + return -1, '', 'error' + + def get_minimal_ceph_conf(self) -> str: + return '' + + def get_mgr_ip(self) -> str: + return '1.2.3.4' + + +class TestCephadmService: + def test_set_service_url_on_dashboard(self): + # pylint: disable=protected-access + mgr = FakeMgr() + service_url = 'http://svc:1000' + service = GrafanaService(mgr) + service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url) + assert mgr.config == service_url + + # set-cmd should not be called if value doesn't change + mgr.check_mon_command.reset_mock() + service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url) + mgr.check_mon_command.assert_called_once_with({'prefix': 'get-cmd'}) + + def _get_services(self, mgr): + # services: + osd_service = OSDService(mgr) + nfs_service = NFSService(mgr) + mon_service = MonService(mgr) + mgr_service = MgrService(mgr) + mds_service = MdsService(mgr) + rgw_service = RgwService(mgr) + rbd_mirror_service = RbdMirrorService(mgr) + grafana_service = GrafanaService(mgr) + alertmanager_service = AlertmanagerService(mgr) + prometheus_service = PrometheusService(mgr) + node_exporter_service = NodeExporterService(mgr) + loki_service = LokiService(mgr) + promtail_service = PromtailService(mgr) + crash_service = CrashService(mgr) + iscsi_service = IscsiService(mgr) + nvmeof_service = NvmeofService(mgr) + cephadm_services = { + 'mon': mon_service, + 'mgr': mgr_service, + 'osd': osd_service, + 'mds': mds_service, + 'rgw': rgw_service, + 'rbd-mirror': rbd_mirror_service, + 'nfs': nfs_service, + 'grafana': grafana_service, + 'alertmanager': alertmanager_service, + 'prometheus': prometheus_service, + 'node-exporter': node_exporter_service, + 'loki': loki_service, + 'promtail': promtail_service, + 'crash': crash_service, + 'iscsi': iscsi_service, + 'nvmeof': nvmeof_service, + } + return cephadm_services + + def test_get_auth_entity(self): + mgr = FakeMgr() + cephadm_services = self._get_services(mgr) + + for daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]: + assert "client.%s.id1" % (daemon_type) == \ + cephadm_services[daemon_type].get_auth_entity("id1", "host") + assert "client.%s.id1" % (daemon_type) == \ + cephadm_services[daemon_type].get_auth_entity("id1", "") + assert "client.%s.id1" % (daemon_type) == \ + cephadm_services[daemon_type].get_auth_entity("id1") + + assert "client.crash.host" == \ + cephadm_services["crash"].get_auth_entity("id1", "host") + with pytest.raises(OrchestratorError): + cephadm_services["crash"].get_auth_entity("id1", "") + cephadm_services["crash"].get_auth_entity("id1") + + assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "host") + assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "") + assert "mon." == cephadm_services["mon"].get_auth_entity("id1") + + assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "host") + assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "") + assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1") + + for daemon_type in ["osd", "mds"]: + assert "%s.id1" % daemon_type == \ + cephadm_services[daemon_type].get_auth_entity("id1", "host") + assert "%s.id1" % daemon_type == \ + cephadm_services[daemon_type].get_auth_entity("id1", "") + assert "%s.id1" % daemon_type == \ + cephadm_services[daemon_type].get_auth_entity("id1") + + # services based on CephadmService shouldn't have get_auth_entity + with pytest.raises(AttributeError): + for daemon_type in ['grafana', 'alertmanager', 'prometheus', 'node-exporter', 'loki', 'promtail']: + cephadm_services[daemon_type].get_auth_entity("id1", "host") + cephadm_services[daemon_type].get_auth_entity("id1", "") + cephadm_services[daemon_type].get_auth_entity("id1") + + +class TestISCSIService: + + mgr = FakeMgr() + iscsi_service = IscsiService(mgr) + + iscsi_spec = IscsiServiceSpec(service_type='iscsi', service_id="a") + iscsi_spec.daemon_type = "iscsi" + iscsi_spec.daemon_id = "a" + iscsi_spec.spec = MagicMock() + iscsi_spec.spec.daemon_type = "iscsi" + iscsi_spec.spec.ssl_cert = '' + iscsi_spec.api_user = "user" + iscsi_spec.api_password = "password" + iscsi_spec.api_port = 5000 + iscsi_spec.api_secure = False + iscsi_spec.ssl_cert = "cert" + iscsi_spec.ssl_key = "key" + + mgr.spec_store = MagicMock() + mgr.spec_store.all_specs.get.return_value = iscsi_spec + + def test_iscsi_client_caps(self): + + iscsi_daemon_spec = CephadmDaemonDeploySpec( + host='host', daemon_id='a', service_name=self.iscsi_spec.service_name()) + + self.iscsi_service.prepare_create(iscsi_daemon_spec) + + expected_caps = ['mon', + 'profile rbd, allow command "osd blocklist", allow command "config-key get" with "key" prefix "iscsi/"', + 'mgr', 'allow command "service status"', + 'osd', 'allow rwx'] + + expected_call = call({'prefix': 'auth get-or-create', + 'entity': 'client.iscsi.a', + 'caps': expected_caps}) + expected_call2 = call({'prefix': 'auth caps', + 'entity': 'client.iscsi.a', + 'caps': expected_caps}) + expected_call3 = call({'prefix': 'auth get', + 'entity': 'client.iscsi.a'}) + + assert expected_call in self.mgr.mon_command.mock_calls + assert expected_call2 in self.mgr.mon_command.mock_calls + assert expected_call3 in self.mgr.mon_command.mock_calls + + @patch('cephadm.utils.resolve_ip') + def test_iscsi_dashboard_config(self, mock_resolve_ip): + + self.mgr.check_mon_command = MagicMock() + self.mgr.check_mon_command.return_value = ('', '{"gateways": {}}', '') + + # Case 1: use IPV4 address + id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1", + daemon_id="a", ip='192.168.1.1') + daemon_list = [id1] + mock_resolve_ip.return_value = '192.168.1.1' + + self.iscsi_service.config_dashboard(daemon_list) + + dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add', + 'name': 'testhost1'}, + 'http://user:password@192.168.1.1:5000') + + assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls + + # Case 2: use IPV6 address + self.mgr.check_mon_command.reset_mock() + + id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1", + daemon_id="a", ip='FEDC:BA98:7654:3210:FEDC:BA98:7654:3210') + mock_resolve_ip.return_value = 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210' + + self.iscsi_service.config_dashboard(daemon_list) + + dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add', + 'name': 'testhost1'}, + 'http://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000') + + assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls + + # Case 3: IPV6 Address . Secure protocol + self.mgr.check_mon_command.reset_mock() + + self.iscsi_spec.api_secure = True + + self.iscsi_service.config_dashboard(daemon_list) + + dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add', + 'name': 'testhost1'}, + 'https://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000') + + assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.module.CephadmOrchestrator.get_unique_name") + @patch("cephadm.services.iscsi.IscsiService.get_trusted_ips") + def test_iscsi_config(self, _get_trusted_ips, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator): + + iscsi_daemon_id = 'testpool.test.qwert' + trusted_ips = '1.1.1.1,2.2.2.2' + api_port = 3456 + api_user = 'test-user' + api_password = 'test-password' + pool = 'testpool' + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + _get_name.return_value = iscsi_daemon_id + _get_trusted_ips.return_value = trusted_ips + + iscsi_gateway_conf = f"""# This file is generated by cephadm. +[config] +cluster_client_name = client.iscsi.{iscsi_daemon_id} +pool = {pool} +trusted_ip_list = {trusted_ips} +minimum_gateways = 1 +api_port = {api_port} +api_user = {api_user} +api_password = {api_password} +api_secure = False +log_to_stderr = True +log_to_stderr_prefix = debug +log_to_file = False""" + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, IscsiServiceSpec(service_id=pool, + api_port=api_port, + api_user=api_user, + api_password=api_password, + pool=pool, + trusted_ip_list=trusted_ips)): + _run_cephadm.assert_called_with( + 'test', + f'iscsi.{iscsi_daemon_id}', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": f'iscsi.{iscsi_daemon_id}', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [api_port], + }, + "meta": { + 'service_name': f'iscsi.{pool}', + 'ports': [api_port], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "", + "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n", + "files": { + "iscsi-gateway.cfg": iscsi_gateway_conf, + }, + } + }), + ) + + +class TestNVMEOFService: + + mgr = FakeMgr() + nvmeof_service = NvmeofService(mgr) + + nvmeof_spec = NvmeofServiceSpec(service_type='nvmeof', service_id="a") + nvmeof_spec.daemon_type = 'nvmeof' + nvmeof_spec.daemon_id = "a" + nvmeof_spec.spec = MagicMock() + nvmeof_spec.spec.daemon_type = 'nvmeof' + + mgr.spec_store = MagicMock() + mgr.spec_store.all_specs.get.return_value = nvmeof_spec + + def test_nvmeof_client_caps(self): + pass + + @patch('cephadm.utils.resolve_ip') + def test_nvmeof_dashboard_config(self, mock_resolve_ip): + pass + + @patch("cephadm.inventory.Inventory.get_addr", lambda _, __: '192.168.100.100') + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.module.CephadmOrchestrator.get_unique_name") + def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator): + + nvmeof_daemon_id = 'testpool.test.qwert' + pool = 'testpool' + tgt_cmd_extra_args = '--cpumask=0xFF --msg-mempool-size=524288' + default_port = 5500 + group = 'mygroup' + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + _get_name.return_value = nvmeof_daemon_id + + nvmeof_gateway_conf = f"""# This file is generated by cephadm. +[gateway] +name = client.nvmeof.{nvmeof_daemon_id} +group = {group} +addr = 192.168.100.100 +port = {default_port} +enable_auth = False +state_update_notify = True +state_update_interval_sec = 5 + +[ceph] +pool = {pool} +config_file = /etc/ceph/ceph.conf +id = nvmeof.{nvmeof_daemon_id} + +[mtls] +server_key = ./server.key +client_key = ./client.key +server_cert = ./server.crt +client_cert = ./client.crt + +[spdk] +tgt_path = /usr/local/bin/nvmf_tgt +rpc_socket = /var/tmp/spdk.sock +timeout = 60 +log_level = WARN +conn_retries = 10 +transports = tcp +transport_tcp_options = {{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7}} +tgt_cmd_extra_args = {tgt_cmd_extra_args}\n""" + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, NvmeofServiceSpec(service_id=pool, + tgt_cmd_extra_args=tgt_cmd_extra_args, + group=group, + pool=pool)): + _run_cephadm.assert_called_with( + 'test', + f'nvmeof.{nvmeof_daemon_id}', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "nvmeof.testpool.test.qwert", + "image": "", + "deploy_arguments": [], + "params": { + "tcp_ports": [5500, 4420, 8009] + }, + "meta": { + "service_name": "nvmeof.testpool", + "ports": [5500, 4420, 8009], + "ip": None, + "deployed_by": [], + "rank": None, + "rank_generation": None, + "extra_container_args": None, + "extra_entrypoint_args": None + }, + "config_blobs": { + "config": "", + "keyring": "[client.nvmeof.testpool.test.qwert]\nkey = None\n", + "files": { + "ceph-nvmeof.conf": nvmeof_gateway_conf + } + } + }), + ) + + +class TestMonitoring: + def _get_config(self, url: str) -> str: + + return f""" + # This file is generated by cephadm. + # See https://prometheus.io/docs/alerting/configuration/ for documentation. + + global: + resolve_timeout: 5m + http_config: + tls_config: + insecure_skip_verify: true + + route: + receiver: 'default' + routes: + - group_by: ['alertname'] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: 'ceph-dashboard' + + receivers: + - name: 'default' + webhook_configs: + - name: 'ceph-dashboard' + webhook_configs: + - url: '{url}/api/prometheus_receiver' + """ + + @pytest.mark.parametrize( + "dashboard_url,expected_yaml_url", + [ + # loopback address + ("http://[::1]:8080", "http://localhost:8080"), + # IPv6 + ( + "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080", + "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080", + ), + # IPv6 to FQDN + ( + "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080", + "http://mgr.fqdn.test:8080", + ), + # IPv4 + ( + "http://192.168.0.123:8080", + "http://192.168.0.123:8080", + ), + # IPv4 to FQDN + ( + "http://192.168.0.123:8080", + "http://mgr.fqdn.test:8080", + ), + ], + ) + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("mgr_module.MgrModule.get") + @patch("socket.getfqdn") + def test_alertmanager_config( + self, + mock_getfqdn, + mock_get, + _run_cephadm, + cephadm_module: CephadmOrchestrator, + dashboard_url, + expected_yaml_url, + ): + _run_cephadm.side_effect = async_side_effect(("{}", "", 0)) + mock_get.return_value = {"services": {"dashboard": dashboard_url}} + purl = urllib.parse.urlparse(expected_yaml_url) + mock_getfqdn.return_value = purl.hostname + + with with_host(cephadm_module, "test"): + with with_service(cephadm_module, AlertManagerSpec()): + y = dedent(self._get_config(expected_yaml_url)).lstrip() + _run_cephadm.assert_called_with( + 'test', + "alertmanager.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'alertmanager.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9093, 9094], + }, + "meta": { + 'service_name': 'alertmanager', + 'ports': [9093, 9094], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "alertmanager.yml": y, + }, + "peers": [], + } + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("socket.getfqdn") + @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1') + @patch("cephadm.services.monitoring.password_hash", lambda password: 'alertmanager_password_hash') + def test_alertmanager_config_security_enabled(self, _get_fqdn, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + fqdn = 'host1.test' + _get_fqdn.return_value = fqdn + + def gen_cert(host, addr): + return ('mycert', 'mykey') + + def get_root_cert(): + return 'my_root_cert' + + with with_host(cephadm_module, 'test'): + cephadm_module.secure_monitoring_stack = True + cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user') + cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password') + cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(side_effect=gen_cert) + cephadm_module.http_server.service_discovery.ssl_certs.get_root_cert = MagicMock(side_effect=get_root_cert) + with with_service(cephadm_module, AlertManagerSpec()): + + y = dedent(f""" + # This file is generated by cephadm. + # See https://prometheus.io/docs/alerting/configuration/ for documentation. + + global: + resolve_timeout: 5m + http_config: + tls_config: + ca_file: root_cert.pem + + route: + receiver: 'default' + routes: + - group_by: ['alertname'] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: 'ceph-dashboard' + + receivers: + - name: 'default' + webhook_configs: + - name: 'ceph-dashboard' + webhook_configs: + - url: 'http://{fqdn}:8080/api/prometheus_receiver' + """).lstrip() + + web_config = dedent(""" + tls_server_config: + cert_file: alertmanager.crt + key_file: alertmanager.key + basic_auth_users: + alertmanager_user: alertmanager_password_hash""").lstrip() + + _run_cephadm.assert_called_with( + 'test', + "alertmanager.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'alertmanager.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9093, 9094], + }, + "meta": { + 'service_name': 'alertmanager', + 'ports': [9093, 9094], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "alertmanager.yml": y, + 'alertmanager.crt': 'mycert', + 'alertmanager.key': 'mykey', + 'web.yml': web_config, + 'root_cert.pem': 'my_root_cert' + }, + 'peers': [], + 'web_config': '/etc/alertmanager/web.yml', + } + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1') + def test_prometheus_config_security_disabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast') + with with_host(cephadm_module, 'test'): + # host "test" needs to have networks for keepalive to be placed + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + }) + with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \ + with_service(cephadm_module, CephExporterSpec('ceph-exporter')) as _, \ + with_service(cephadm_module, s) as _, \ + with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \ + with_service(cephadm_module, IngressSpec(service_id='ingress', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ip="1.2.3.4/32", + backend_service='rgw.foo')) as _, \ + with_service(cephadm_module, PrometheusSpec('prometheus')) as _: + + y = dedent(""" + # This file is generated by cephadm. + global: + scrape_interval: 10s + evaluation_interval: 10s + rule_files: + - /etc/prometheus/alerting/* + + alerting: + alertmanagers: + - scheme: http + http_sd_configs: + - url: http://[::1]:8765/sd/prometheus/sd-config?service=alertmanager + + scrape_configs: + - job_name: 'ceph' + honor_labels: true + http_sd_configs: + - url: http://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus + + - job_name: 'node' + http_sd_configs: + - url: http://[::1]:8765/sd/prometheus/sd-config?service=node-exporter + + - job_name: 'haproxy' + http_sd_configs: + - url: http://[::1]:8765/sd/prometheus/sd-config?service=haproxy + + - job_name: 'ceph-exporter' + honor_labels: true + http_sd_configs: + - url: http://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter + """).lstrip() + + _run_cephadm.assert_called_with( + 'test', + "prometheus.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'prometheus.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9095], + }, + "meta": { + 'service_name': 'prometheus', + 'ports': [9095], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "prometheus.yml": y, + "/etc/prometheus/alerting/custom_alerts.yml": "", + }, + 'retention_time': '15d', + 'retention_size': '0', + }, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1') + @patch("cephadm.services.monitoring.password_hash", lambda password: 'prometheus_password_hash') + def test_prometheus_config_security_enabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast') + + def gen_cert(host, addr): + return ('mycert', 'mykey') + + with with_host(cephadm_module, 'test'): + cephadm_module.secure_monitoring_stack = True + cephadm_module.set_store(PrometheusService.USER_CFG_KEY, 'prometheus_user') + cephadm_module.set_store(PrometheusService.PASS_CFG_KEY, 'prometheus_plain_password') + cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user') + cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password') + cephadm_module.http_server.service_discovery.username = 'sd_user' + cephadm_module.http_server.service_discovery.password = 'sd_password' + cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock( + side_effect=gen_cert) + # host "test" needs to have networks for keepalive to be placed + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + }) + with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \ + with_service(cephadm_module, s) as _, \ + with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \ + with_service(cephadm_module, IngressSpec(service_id='ingress', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ip="1.2.3.4/32", + backend_service='rgw.foo')) as _, \ + with_service(cephadm_module, PrometheusSpec('prometheus')) as _: + + web_config = dedent(""" + tls_server_config: + cert_file: prometheus.crt + key_file: prometheus.key + basic_auth_users: + prometheus_user: prometheus_password_hash""").lstrip() + + y = dedent(""" + # This file is generated by cephadm. + global: + scrape_interval: 10s + evaluation_interval: 10s + rule_files: + - /etc/prometheus/alerting/* + + alerting: + alertmanagers: + - scheme: https + basic_auth: + username: alertmanager_user + password: alertmanager_plain_password + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager + basic_auth: + username: sd_user + password: sd_password + tls_config: + ca_file: root_cert.pem + + scrape_configs: + - job_name: 'ceph' + scheme: https + tls_config: + ca_file: mgr_prometheus_cert.pem + honor_labels: true + http_sd_configs: + - url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus + basic_auth: + username: sd_user + password: sd_password + tls_config: + ca_file: root_cert.pem + + - job_name: 'node' + scheme: https + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter + basic_auth: + username: sd_user + password: sd_password + tls_config: + ca_file: root_cert.pem + + - job_name: 'haproxy' + scheme: https + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy + basic_auth: + username: sd_user + password: sd_password + tls_config: + ca_file: root_cert.pem + + - job_name: 'ceph-exporter' + honor_labels: true + scheme: https + tls_config: + ca_file: root_cert.pem + http_sd_configs: + - url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter + basic_auth: + username: sd_user + password: sd_password + tls_config: + ca_file: root_cert.pem + """).lstrip() + + _run_cephadm.assert_called_with( + 'test', + "prometheus.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'prometheus.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9095], + }, + "meta": { + 'service_name': 'prometheus', + 'ports': [9095], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + 'files': { + 'prometheus.yml': y, + 'root_cert.pem': '', + 'mgr_prometheus_cert.pem': '', + 'web.yml': web_config, + 'prometheus.crt': 'mycert', + 'prometheus.key': 'mykey', + "/etc/prometheus/alerting/custom_alerts.yml": "", + }, + 'retention_time': '15d', + 'retention_size': '0', + 'web_config': '/etc/prometheus/web.yml', + }, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_loki_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, MonitoringSpec('loki')) as _: + + y = dedent(""" + # This file is generated by cephadm. + auth_enabled: false + + server: + http_listen_port: 3100 + grpc_listen_port: 8080 + + common: + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + replication_factor: 1 + ring: + instance_addr: 127.0.0.1 + kvstore: + store: inmemory + + schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h""").lstrip() + + _run_cephadm.assert_called_with( + 'test', + "loki.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'loki.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [3100], + }, + "meta": { + 'service_name': 'loki', + 'ports': [3100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "loki.yml": y + }, + }, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec('mgr')) as _, \ + with_service(cephadm_module, MonitoringSpec('promtail')) as _: + + y = dedent(""" + # This file is generated by cephadm. + server: + http_listen_port: 9080 + grpc_listen_port: 0 + + positions: + filename: /tmp/positions.yaml + + clients: + - url: http://:3100/loki/api/v1/push + + scrape_configs: + - job_name: system + static_configs: + - labels: + job: Cluster Logs + __path__: /var/log/ceph/**/*.log""").lstrip() + + _run_cephadm.assert_called_with( + 'test', + "promtail.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'promtail.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9080], + }, + "meta": { + 'service_name': 'promtail', + 'ports': [9080], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "promtail.yml": y + }, + }, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4') + @patch("cephadm.services.monitoring.verify_tls", lambda *_: None) + def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(("{}", "", 0)) + + with with_host(cephadm_module, "test"): + cephadm_module.set_store("test/grafana_crt", grafana_cert) + cephadm_module.set_store("test/grafana_key", grafana_key) + with with_service( + cephadm_module, PrometheusSpec("prometheus") + ) as _, with_service(cephadm_module, ServiceSpec("mgr")) as _, with_service( + cephadm_module, GrafanaSpec("grafana") + ) as _: + files = { + 'grafana.ini': dedent(""" + # This file is generated by cephadm. + [users] + default_theme = light + [auth.anonymous] + enabled = true + org_name = 'Main Org.' + org_role = 'Viewer' + [server] + domain = 'bootstrap.storage.lab' + protocol = https + cert_file = /etc/grafana/certs/cert_file + cert_key = /etc/grafana/certs/cert_key + http_port = 3000 + http_addr = + [snapshots] + external_enabled = false + [security] + disable_initial_admin_creation = true + cookie_secure = true + cookie_samesite = none + allow_embedding = true""").lstrip(), # noqa: W291 + 'provisioning/datasources/ceph-dashboard.yml': dedent(""" + # This file is generated by cephadm. + apiVersion: 1 + + deleteDatasources: + - name: 'Dashboard1' + orgId: 1 + + datasources: + - name: 'Dashboard1' + type: 'prometheus' + access: 'proxy' + orgId: 1 + url: 'http://[1::4]:9095' + basicAuth: false + isDefault: true + editable: false + + - name: 'Loki' + type: 'loki' + access: 'proxy' + url: '' + basicAuth: false + isDefault: false + editable: false""").lstrip(), + 'certs/cert_file': dedent(f""" + # generated by cephadm\n{grafana_cert}""").lstrip(), + 'certs/cert_key': dedent(f""" + # generated by cephadm\n{grafana_key}""").lstrip(), + } + + _run_cephadm.assert_called_with( + 'test', + "grafana.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'grafana.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [3000], + }, + "meta": { + 'service_name': 'grafana', + 'ports': [3000], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": files, + }, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec('mgr')) as _, \ + with_service(cephadm_module, GrafanaSpec(initial_admin_password='secure')): + out = cephadm_module.cephadm_services['grafana'].generate_config( + CephadmDaemonDeploySpec('test', 'daemon', 'grafana')) + assert out == ( + { + 'files': + { + 'grafana.ini': + '# This file is generated by cephadm.\n' + '[users]\n' + ' default_theme = light\n' + '[auth.anonymous]\n' + ' enabled = true\n' + " org_name = 'Main Org.'\n" + " org_role = 'Viewer'\n" + '[server]\n' + " domain = 'bootstrap.storage.lab'\n" + ' protocol = https\n' + ' cert_file = /etc/grafana/certs/cert_file\n' + ' cert_key = /etc/grafana/certs/cert_key\n' + ' http_port = 3000\n' + ' http_addr = \n' + '[snapshots]\n' + ' external_enabled = false\n' + '[security]\n' + ' admin_user = admin\n' + ' admin_password = secure\n' + ' cookie_secure = true\n' + ' cookie_samesite = none\n' + ' allow_embedding = true', + 'provisioning/datasources/ceph-dashboard.yml': + "# This file is generated by cephadm.\n" + "apiVersion: 1\n\n" + 'deleteDatasources:\n\n' + 'datasources:\n\n' + " - name: 'Loki'\n" + " type: 'loki'\n" + " access: 'proxy'\n" + " url: ''\n" + ' basicAuth: false\n' + ' isDefault: false\n' + ' editable: false', + 'certs/cert_file': ANY, + 'certs/cert_key': ANY}}, ['secure_monitoring_stack:False']) + + @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_grafana_no_anon_access(self, cephadm_module: CephadmOrchestrator): + # with anonymous_access set to False, expecting the [auth.anonymous] section + # to not be present in the grafana config. Note that we require an initial_admin_password + # to be provided when anonymous_access is False + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec('mgr')) as _, \ + with_service(cephadm_module, GrafanaSpec(anonymous_access=False, initial_admin_password='secure')): + out = cephadm_module.cephadm_services['grafana'].generate_config( + CephadmDaemonDeploySpec('test', 'daemon', 'grafana')) + assert out == ( + { + 'files': + { + 'grafana.ini': + '# This file is generated by cephadm.\n' + '[users]\n' + ' default_theme = light\n' + '[server]\n' + " domain = 'bootstrap.storage.lab'\n" + ' protocol = https\n' + ' cert_file = /etc/grafana/certs/cert_file\n' + ' cert_key = /etc/grafana/certs/cert_key\n' + ' http_port = 3000\n' + ' http_addr = \n' + '[snapshots]\n' + ' external_enabled = false\n' + '[security]\n' + ' admin_user = admin\n' + ' admin_password = secure\n' + ' cookie_secure = true\n' + ' cookie_samesite = none\n' + ' allow_embedding = true', + 'provisioning/datasources/ceph-dashboard.yml': + "# This file is generated by cephadm.\n" + "apiVersion: 1\n\n" + 'deleteDatasources:\n\n' + 'datasources:\n\n' + " - name: 'Loki'\n" + " type: 'loki'\n" + " access: 'proxy'\n" + " url: ''\n" + ' basicAuth: false\n' + ' isDefault: false\n' + ' editable: false', + 'certs/cert_file': ANY, + 'certs/cert_key': ANY}}, ['secure_monitoring_stack:False']) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_monitoring_ports(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + + yaml_str = """service_type: alertmanager +service_name: alertmanager +placement: + count: 1 +spec: + port: 4200 +""" + yaml_file = yaml.safe_load(yaml_str) + spec = ServiceSpec.from_json(yaml_file) + + with patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value=({}, [])): + with with_service(cephadm_module, spec): + + CephadmServe(cephadm_module)._check_daemons() + + _run_cephadm.assert_called_with( + 'test', + "alertmanager.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'alertmanager.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [4200, 9094], + 'reconfig': True, + }, + "meta": { + 'service_name': 'alertmanager', + 'ports': [4200, 9094], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": {}, + }), + ) + + +class TestRGWService: + + @pytest.mark.parametrize( + "frontend, ssl, extra_args, expected", + [ + ('beast', False, ['tcp_nodelay=1'], + 'beast endpoint=[fd00:fd00:fd00:3000::1]:80 tcp_nodelay=1'), + ('beast', True, ['tcp_nodelay=0', 'max_header_size=65536'], + 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo tcp_nodelay=0 max_header_size=65536'), + ('civetweb', False, [], 'civetweb port=[fd00:fd00:fd00:3000::1]:80'), + ('civetweb', True, None, + 'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'), + ] + ) + @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) + def test_rgw_update(self, frontend, ssl, extra_args, expected, cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.cache.update_host_networks('host1', { + 'fd00:fd00:fd00:3000::/64': { + 'if0': ['fd00:fd00:fd00:3000::1'] + } + }) + s = RGWSpec(service_id="foo", + networks=['fd00:fd00:fd00:3000::/64'], + ssl=ssl, + rgw_frontend_type=frontend, + rgw_frontend_extra_args=extra_args) + with with_service(cephadm_module, s) as dds: + _, f, _ = cephadm_module.check_mon_command({ + 'prefix': 'config get', + 'who': f'client.{dds[0]}', + 'key': 'rgw_frontends', + }) + assert f == expected + + +class TestMonService: + + def test_set_crush_locations(self, cephadm_module: CephadmOrchestrator): + mgr = FakeMgr() + mon_service = MonService(mgr) + mon_spec = ServiceSpec(service_type='mon', crush_locations={'vm-00': ['datacenter=a', 'rack=1'], 'vm-01': ['datacenter=a'], 'vm-02': ['datacenter=b', 'rack=3']}) + + mon_daemons = [ + DaemonDescription(daemon_type='mon', daemon_id='vm-00', hostname='vm-00'), + DaemonDescription(daemon_type='mon', daemon_id='vm-01', hostname='vm-01'), + DaemonDescription(daemon_type='mon', daemon_id='vm-02', hostname='vm-02') + ] + mon_service.set_crush_locations(mon_daemons, mon_spec) + assert 'vm-00' in mgr.set_mon_crush_locations + assert mgr.set_mon_crush_locations['vm-00'] == ['datacenter=a', 'rack=1'] + assert 'vm-01' in mgr.set_mon_crush_locations + assert mgr.set_mon_crush_locations['vm-01'] == ['datacenter=a'] + assert 'vm-02' in mgr.set_mon_crush_locations + assert mgr.set_mon_crush_locations['vm-02'] == ['datacenter=b', 'rack=3'] + + +class TestSNMPGateway: + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_snmp_v2c_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + spec = SNMPGatewaySpec( + snmp_version='V2c', + snmp_destination='192.168.1.1:162', + credentials={ + 'snmp_community': 'public' + }) + + config = { + "destination": spec.snmp_destination, + "snmp_version": spec.snmp_version, + "snmp_community": spec.credentials.get('snmp_community') + } + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec): + _run_cephadm.assert_called_with( + 'test', + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9464], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9464], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_snmp_v2c_with_port(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + spec = SNMPGatewaySpec( + snmp_version='V2c', + snmp_destination='192.168.1.1:162', + credentials={ + 'snmp_community': 'public' + }, + port=9465) + + config = { + "destination": spec.snmp_destination, + "snmp_version": spec.snmp_version, + "snmp_community": spec.credentials.get('snmp_community') + } + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec): + _run_cephadm.assert_called_with( + 'test', + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9465], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9465], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_snmp_v3nopriv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + spec = SNMPGatewaySpec( + snmp_version='V3', + snmp_destination='192.168.1.1:162', + engine_id='8000C53F00000000', + credentials={ + 'snmp_v3_auth_username': 'myuser', + 'snmp_v3_auth_password': 'mypassword' + }) + + config = { + 'destination': spec.snmp_destination, + 'snmp_version': spec.snmp_version, + 'snmp_v3_auth_protocol': 'SHA', + 'snmp_v3_auth_username': 'myuser', + 'snmp_v3_auth_password': 'mypassword', + 'snmp_v3_engine_id': '8000C53F00000000' + } + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec): + _run_cephadm.assert_called_with( + 'test', + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9464], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9464], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_snmp_v3priv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + spec = SNMPGatewaySpec( + snmp_version='V3', + snmp_destination='192.168.1.1:162', + engine_id='8000C53F00000000', + auth_protocol='MD5', + privacy_protocol='AES', + credentials={ + 'snmp_v3_auth_username': 'myuser', + 'snmp_v3_auth_password': 'mypassword', + 'snmp_v3_priv_password': 'mysecret', + }) + + config = { + 'destination': spec.snmp_destination, + 'snmp_version': spec.snmp_version, + 'snmp_v3_auth_protocol': 'MD5', + 'snmp_v3_auth_username': spec.credentials.get('snmp_v3_auth_username'), + 'snmp_v3_auth_password': spec.credentials.get('snmp_v3_auth_password'), + 'snmp_v3_engine_id': '8000C53F00000000', + 'snmp_v3_priv_protocol': spec.privacy_protocol, + 'snmp_v3_priv_password': spec.credentials.get('snmp_v3_priv_password'), + } + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec): + _run_cephadm.assert_called_with( + 'test', + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9464], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9464], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), + ) + + +class TestIngressService: + + @pytest.mark.parametrize( + "enable_haproxy_protocol", + [False, True], + ) + @patch("cephadm.inventory.Inventory.get_addr") + @patch("cephadm.utils.resolve_ip") + @patch("cephadm.inventory.HostCache.get_daemons_by_service") + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config_nfs_multiple_nfs_same_rank( + self, + _run_cephadm, + _get_daemons_by_service, + _resolve_ip, _get_addr, + cephadm_module: CephadmOrchestrator, + enable_haproxy_protocol: bool, + ): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + def fake_resolve_ip(hostname: str) -> str: + if hostname == 'host1': + return '192.168.122.111' + elif hostname == 'host2': + return '192.168.122.222' + else: + return 'xxx.xxx.xxx.xxx' + _resolve_ip.side_effect = fake_resolve_ip + + def fake_get_addr(hostname: str) -> str: + return hostname + _get_addr.side_effect = fake_get_addr + + nfs_service = NFSServiceSpec( + service_id="foo", + placement=PlacementSpec( + count=1, + hosts=['host1', 'host2']), + port=12049, + enable_haproxy_protocol=enable_haproxy_protocol, + ) + + ispec = IngressSpec( + service_type='ingress', + service_id='nfs.foo', + backend_service='nfs.foo', + frontend_port=2049, + monitor_port=9049, + virtual_ip='192.168.122.100/24', + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + enable_haproxy_protocol=enable_haproxy_protocol, + ) + + cephadm_module.spec_store._specs = { + 'nfs.foo': nfs_service, + 'ingress.nfs.foo': ispec + } + cephadm_module.spec_store.spec_created = { + 'nfs.foo': datetime_now(), + 'ingress.nfs.foo': datetime_now() + } + + # in both test cases we'll do here, we want only the ip + # for the host1 nfs daemon as we'll end up giving that + # one higher rank_generation but the same rank as the one + # on host2 + haproxy_txt = ( + '# This file is generated by cephadm.\n' + 'global\n' + ' log 127.0.0.1 local2\n' + ' chroot /var/lib/haproxy\n' + ' pidfile /var/lib/haproxy/haproxy.pid\n' + ' maxconn 8000\n' + ' daemon\n' + ' stats socket /var/lib/haproxy/stats\n\n' + 'defaults\n' + ' mode tcp\n' + ' log global\n' + ' timeout queue 1m\n' + ' timeout connect 10s\n' + ' timeout client 1m\n' + ' timeout server 1m\n' + ' timeout check 10s\n' + ' maxconn 8000\n\n' + 'frontend stats\n' + ' mode http\n' + ' bind 192.168.122.100:9049\n' + ' bind host1:9049\n' + ' stats enable\n' + ' stats uri /stats\n' + ' stats refresh 10s\n' + ' stats auth admin:12345\n' + ' http-request use-service prometheus-exporter if { path /metrics }\n' + ' monitor-uri /health\n\n' + 'frontend frontend\n' + ' bind 192.168.122.100:2049\n' + ' default_backend backend\n\n' + 'backend backend\n' + ' mode tcp\n' + ' balance source\n' + ' hash-type consistent\n' + ) + if enable_haproxy_protocol: + haproxy_txt += ' default-server send-proxy-v2\n' + haproxy_txt += ' server nfs.foo.0 192.168.122.111:12049\n' + haproxy_expected_conf = { + 'files': {'haproxy.cfg': haproxy_txt} + } + + # verify we get the same cfg regardless of the order in which the nfs daemons are returned + # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a + # the one on host1 + nfs_daemons = [ + DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049]), + DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049]) + ] + _get_daemons_by_service.return_value = nfs_daemons + + haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config( + CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name())) + + assert haproxy_generated_conf[0] == haproxy_expected_conf + + # swapping order now, should still pick out the one with the higher rank_generation + # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a + # the one on host1 + nfs_daemons = [ + DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049]), + DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049]) + ] + _get_daemons_by_service.return_value = nfs_daemons + + haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config( + CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name())) + + assert haproxy_generated_conf[0] == haproxy_expected_conf + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.7'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.4'] + } + }) + + # the ingress backend + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_interface_networks=['1.2.3.0/24'], + virtual_ip="1.2.3.4/32") + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # generate the keepalived conf based on the specified spec + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1.2.3.4\n ' + 'unicast_peer {\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.4/32 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + # generate the haproxy conf based on the specified spec + haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + haproxy_expected_conf = { + 'files': + { + 'haproxy.cfg': + '# This file is generated by cephadm.' + '\nglobal\n log ' + '127.0.0.1 local2\n ' + 'chroot /var/lib/haproxy\n ' + 'pidfile /var/lib/haproxy/haproxy.pid\n ' + 'maxconn 8000\n ' + 'daemon\n ' + 'stats socket /var/lib/haproxy/stats\n' + '\ndefaults\n ' + 'mode http\n ' + 'log global\n ' + 'option httplog\n ' + 'option dontlognull\n ' + 'option http-server-close\n ' + 'option forwardfor except 127.0.0.0/8\n ' + 'option redispatch\n ' + 'retries 3\n ' + 'timeout queue 20s\n ' + 'timeout connect 5s\n ' + 'timeout http-request 1s\n ' + 'timeout http-keep-alive 5s\n ' + 'timeout client 30s\n ' + 'timeout server 30s\n ' + 'timeout check 5s\n ' + 'maxconn 8000\n' + '\nfrontend stats\n ' + 'mode http\n ' + 'bind 1.2.3.4:8999\n ' + 'bind 1.2.3.7:8999\n ' + 'stats enable\n ' + 'stats uri /stats\n ' + 'stats refresh 10s\n ' + 'stats auth admin:12345\n ' + 'http-request use-service prometheus-exporter if { path /metrics }\n ' + 'monitor-uri /health\n' + '\nfrontend frontend\n ' + 'bind 1.2.3.4:8089\n ' + 'default_backend backend\n\n' + 'backend backend\n ' + 'option forwardfor\n ' + 'balance static-rr\n ' + 'option httpchk HEAD / HTTP/1.0\n ' + 'server ' + + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100\n' + } + } + + assert haproxy_generated_conf[0] == haproxy_expected_conf + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config_ssl_rgw(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + } + }) + + # the ingress backend + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast', rgw_frontend_port=443, ssl=True) + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_interface_networks=['1.2.3.0/24'], + virtual_ip="1.2.3.4/32") + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # generate the keepalived conf based on the specified spec + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/curl http://[1::4]:8999/health"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1.2.3.1\n ' + 'unicast_peer {\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.4/32 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + # generate the haproxy conf based on the specified spec + haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + haproxy_expected_conf = { + 'files': + { + 'haproxy.cfg': + '# This file is generated by cephadm.' + '\nglobal\n log ' + '127.0.0.1 local2\n ' + 'chroot /var/lib/haproxy\n ' + 'pidfile /var/lib/haproxy/haproxy.pid\n ' + 'maxconn 8000\n ' + 'daemon\n ' + 'stats socket /var/lib/haproxy/stats\n' + '\ndefaults\n ' + 'mode http\n ' + 'log global\n ' + 'option httplog\n ' + 'option dontlognull\n ' + 'option http-server-close\n ' + 'option forwardfor except 127.0.0.0/8\n ' + 'option redispatch\n ' + 'retries 3\n ' + 'timeout queue 20s\n ' + 'timeout connect 5s\n ' + 'timeout http-request 1s\n ' + 'timeout http-keep-alive 5s\n ' + 'timeout client 30s\n ' + 'timeout server 30s\n ' + 'timeout check 5s\n ' + 'maxconn 8000\n' + '\nfrontend stats\n ' + 'mode http\n ' + 'bind 1.2.3.4:8999\n ' + 'bind 1::4:8999\n ' + 'stats enable\n ' + 'stats uri /stats\n ' + 'stats refresh 10s\n ' + 'stats auth admin:12345\n ' + 'http-request use-service prometheus-exporter if { path /metrics }\n ' + 'monitor-uri /health\n' + '\nfrontend frontend\n ' + 'bind 1.2.3.4:8089\n ' + 'default_backend backend\n\n' + 'backend backend\n ' + 'option forwardfor\n ' + 'default-server ssl\n ' + 'default-server verify none\n ' + 'balance static-rr\n ' + 'option httpchk HEAD / HTTP/1.0\n ' + 'server ' + + haproxy_generated_conf[1][0] + ' 1::4:443 check weight 100\n' + } + } + + assert haproxy_generated_conf[0] == haproxy_expected_conf + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config_multi_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.7'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + } + }) + + # Check the ingress with multiple VIPs + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_interface_networks=['1.2.3.0/24'], + virtual_ips_list=["1.2.3.4/32"]) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # generate the keepalived conf based on the specified spec + # Test with only 1 IP on the list, as it will fail with more VIPS but only one host. + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1.2.3.1\n ' + 'unicast_peer {\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.4/32 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + # generate the haproxy conf based on the specified spec + haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + haproxy_expected_conf = { + 'files': + { + 'haproxy.cfg': + '# This file is generated by cephadm.' + '\nglobal\n log ' + '127.0.0.1 local2\n ' + 'chroot /var/lib/haproxy\n ' + 'pidfile /var/lib/haproxy/haproxy.pid\n ' + 'maxconn 8000\n ' + 'daemon\n ' + 'stats socket /var/lib/haproxy/stats\n' + '\ndefaults\n ' + 'mode http\n ' + 'log global\n ' + 'option httplog\n ' + 'option dontlognull\n ' + 'option http-server-close\n ' + 'option forwardfor except 127.0.0.0/8\n ' + 'option redispatch\n ' + 'retries 3\n ' + 'timeout queue 20s\n ' + 'timeout connect 5s\n ' + 'timeout http-request 1s\n ' + 'timeout http-keep-alive 5s\n ' + 'timeout client 30s\n ' + 'timeout server 30s\n ' + 'timeout check 5s\n ' + 'maxconn 8000\n' + '\nfrontend stats\n ' + 'mode http\n ' + 'bind *:8999\n ' + 'bind 1.2.3.7:8999\n ' + 'stats enable\n ' + 'stats uri /stats\n ' + 'stats refresh 10s\n ' + 'stats auth admin:12345\n ' + 'http-request use-service prometheus-exporter if { path /metrics }\n ' + 'monitor-uri /health\n' + '\nfrontend frontend\n ' + 'bind *:8089\n ' + 'default_backend backend\n\n' + 'backend backend\n ' + 'option forwardfor\n ' + 'balance static-rr\n ' + 'option httpchk HEAD / HTTP/1.0\n ' + 'server ' + + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100\n' + } + } + + assert haproxy_generated_conf[0] == haproxy_expected_conf + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_haproxy_port_ips(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.7'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.4/32'] + } + }) + + # Check the ingress with multiple VIPs + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ip = '1.2.3.100' + frontend_port = 8089 + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='rgw.foo', + frontend_port=frontend_port, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ip=f"{ip}/24") + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # generate the haproxy conf based on the specified spec + haproxy_daemon_spec = cephadm_module.cephadm_services['ingress'].prepare_create( + CephadmDaemonDeploySpec( + host='test', + daemon_type='haproxy', + daemon_id='ingress', + service_name=ispec.service_name())) + + assert haproxy_daemon_spec.port_ips == {str(frontend_port): ip} + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_keepalive_config_multi_interface_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.1'): + with with_host(cephadm_module, 'test2', addr='1.2.3.2'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.1'] + } + }) + cephadm_module.cache.update_host_networks('test2', { + '1.2.3.0/24': { + 'if0': ['1.2.3.2'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.2'] + } + }) + + # Check the ingress with multiple VIPs + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + placement=PlacementSpec(hosts=['test', 'test2']), + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"]) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/curl http://1.2.3.1:8999/health"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1.2.3.1\n ' + 'unicast_peer {\n ' + '1.2.3.2\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.100/24 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + 'vrrp_instance VI_1 {\n ' + 'state BACKUP\n ' + 'priority 90\n ' + 'interface if1\n ' + 'virtual_router_id 51\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 100.100.100.1\n ' + 'unicast_peer {\n ' + '100.100.100.2\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '100.100.100.100/24 dev if1\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_keepalive_interface_host_filtering(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + # we need to make sure keepalive daemons will have an interface + # on the hosts we deploy them on in order to set up their VIP. + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.1'): + with with_host(cephadm_module, 'test2', addr='1.2.3.2'): + with with_host(cephadm_module, 'test3', addr='1.2.3.3'): + with with_host(cephadm_module, 'test4', addr='1.2.3.3'): + # setup "test" and "test4" to have all the necessary interfaces, + # "test2" to have one of them (should still be filtered) + # and "test3" to have none of them + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.1'] + } + }) + cephadm_module.cache.update_host_networks('test2', { + '1.2.3.0/24': { + 'if0': ['1.2.3.2'] + }, + }) + cephadm_module.cache.update_host_networks('test4', { + '1.2.3.0/24': { + 'if0': ['1.2.3.4'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.4'] + } + }) + + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + placement=PlacementSpec(hosts=['test', 'test2', 'test3', 'test4']), + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"]) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # since we're never actually going to refresh the host here, + # check the tmp daemons to see what was placed during the apply + daemons = cephadm_module.cache._get_tmp_daemons() + keepalive_daemons = [d for d in daemons if d.daemon_type == 'keepalived'] + hosts_deployed_on = [d.hostname for d in keepalive_daemons] + assert 'test' in hosts_deployed_on + assert 'test2' not in hosts_deployed_on + assert 'test3' not in hosts_deployed_on + assert 'test4' in hosts_deployed_on + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock()) + @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock()) + @patch("cephadm.services.nfs.NFSService.purge", MagicMock()) + @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock()) + def test_keepalive_only_nfs_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.7'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + } + }) + + # Check the ingress with multiple VIPs + s = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1), + virtual_ip='1.2.3.0/24') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='nfs.foo', + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ip='1.2.3.0/24', + keepalive_only=True) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + nfs_generated_conf, _ = cephadm_module.cephadm_services['nfs'].generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='foo.test.0.0', service_name=s.service_name())) + ganesha_conf = nfs_generated_conf['files']['ganesha.conf'] + assert "Bind_addr = 1.2.3.0/24" in ganesha_conf + + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/false"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1.2.3.1\n ' + 'unicast_peer {\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.0/24 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock()) + @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock()) + @patch("cephadm.services.nfs.NFSService.purge", MagicMock()) + @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock()) + @patch("cephadm.inventory.Inventory.keys") + @patch("cephadm.inventory.Inventory.get_addr") + @patch("cephadm.utils.resolve_ip") + @patch("cephadm.inventory.HostCache.get_daemons_by_service") + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config_nfs_proxy_protocol( + self, + _run_cephadm, + _get_daemons_by_service, + _resolve_ip, + _get_addr, + _inventory_keys, + cephadm_module: CephadmOrchestrator, + ): + """Verify that setting enable_haproxy_protocol for both ingress and + nfs services sets the desired configuration parameters in both + the haproxy config and nfs ganesha config. + """ + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + def fake_resolve_ip(hostname: str) -> str: + if hostname in ('host1', "192.168.122.111"): + return '192.168.122.111' + elif hostname in ('host2', '192.168.122.222'): + return '192.168.122.222' + else: + raise KeyError(hostname) + _resolve_ip.side_effect = fake_resolve_ip + _get_addr.side_effect = fake_resolve_ip + + def fake_keys(): + return ['host1', 'host2'] + _inventory_keys.side_effect = fake_keys + + nfs_service = NFSServiceSpec( + service_id="foo", + placement=PlacementSpec( + count=1, + hosts=['host1', 'host2']), + port=12049, + enable_haproxy_protocol=True, + ) + + ispec = IngressSpec( + service_type='ingress', + service_id='nfs.foo', + backend_service='nfs.foo', + frontend_port=2049, + monitor_port=9049, + virtual_ip='192.168.122.100/24', + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + enable_haproxy_protocol=True, + ) + + cephadm_module.spec_store._specs = { + 'nfs.foo': nfs_service, + 'ingress.nfs.foo': ispec + } + cephadm_module.spec_store.spec_created = { + 'nfs.foo': datetime_now(), + 'ingress.nfs.foo': datetime_now() + } + + haproxy_txt = ( + '# This file is generated by cephadm.\n' + 'global\n' + ' log 127.0.0.1 local2\n' + ' chroot /var/lib/haproxy\n' + ' pidfile /var/lib/haproxy/haproxy.pid\n' + ' maxconn 8000\n' + ' daemon\n' + ' stats socket /var/lib/haproxy/stats\n\n' + 'defaults\n' + ' mode tcp\n' + ' log global\n' + ' timeout queue 1m\n' + ' timeout connect 10s\n' + ' timeout client 1m\n' + ' timeout server 1m\n' + ' timeout check 10s\n' + ' maxconn 8000\n\n' + 'frontend stats\n' + ' mode http\n' + ' bind 192.168.122.100:9049\n' + ' bind 192.168.122.111:9049\n' + ' stats enable\n' + ' stats uri /stats\n' + ' stats refresh 10s\n' + ' stats auth admin:12345\n' + ' http-request use-service prometheus-exporter if { path /metrics }\n' + ' monitor-uri /health\n\n' + 'frontend frontend\n' + ' bind 192.168.122.100:2049\n' + ' default_backend backend\n\n' + 'backend backend\n' + ' mode tcp\n' + ' balance source\n' + ' hash-type consistent\n' + ' default-server send-proxy-v2\n' + ' server nfs.foo.0 192.168.122.111:12049\n' + ) + haproxy_expected_conf = { + 'files': {'haproxy.cfg': haproxy_txt} + } + + nfs_ganesha_txt = ( + "# This file is generated by cephadm.\n" + 'NFS_CORE_PARAM {\n' + ' Enable_NLM = false;\n' + ' Enable_RQUOTA = false;\n' + ' Protocols = 4;\n' + ' NFS_Port = 2049;\n' + ' HAProxy_Hosts = 192.168.122.111, 10.10.2.20, 192.168.122.222;\n' + '}\n' + '\n' + 'NFSv4 {\n' + ' Delegations = false;\n' + " RecoveryBackend = 'rados_cluster';\n" + ' Minor_Versions = 1, 2;\n' + '}\n' + '\n' + 'RADOS_KV {\n' + ' UserId = "nfs.foo.test.0.0";\n' + ' nodeid = "nfs.foo.None";\n' + ' pool = ".nfs";\n' + ' namespace = "foo";\n' + '}\n' + '\n' + 'RADOS_URLS {\n' + ' UserId = "nfs.foo.test.0.0";\n' + ' watch_url = ' + '"rados://.nfs/foo/conf-nfs.foo";\n' + '}\n' + '\n' + 'RGW {\n' + ' cluster = "ceph";\n' + ' name = "client.nfs.foo.test.0.0-rgw";\n' + '}\n' + '\n' + "%url rados://.nfs/foo/conf-nfs.foo" + ) + nfs_expected_conf = { + 'files': {'ganesha.conf': nfs_ganesha_txt}, + 'config': '', + 'extra_args': ['-N', 'NIV_EVENT'], + 'keyring': ( + '[client.nfs.foo.test.0.0]\n' + 'key = None\n' + ), + 'namespace': 'foo', + 'pool': '.nfs', + 'rgw': { + 'cluster': 'ceph', + 'keyring': ( + '[client.nfs.foo.test.0.0-rgw]\n' + 'key = None\n' + ), + 'user': 'nfs.foo.test.0.0-rgw', + }, + 'userid': 'nfs.foo.test.0.0', + } + + nfs_daemons = [ + DaemonDescription( + daemon_type='nfs', + daemon_id='foo.0.1.host1.qwerty', + hostname='host1', + rank=0, + rank_generation=1, + ports=[12049], + ), + DaemonDescription( + daemon_type='nfs', + daemon_id='foo.0.0.host2.abcdef', + hostname='host2', + rank=0, + rank_generation=0, + ports=[12049], + ), + ] + _get_daemons_by_service.return_value = nfs_daemons + + ingress_svc = cephadm_module.cephadm_services['ingress'] + nfs_svc = cephadm_module.cephadm_services['nfs'] + + # add host network info to one host to test the behavior of + # adding all known-good addresses of the host to the list. + cephadm_module.cache.update_host_networks('host1', { + # this one is additional + '10.10.2.0/24': { + 'eth1': ['10.10.2.20'] + }, + # this is redundant and will be skipped + '192.168.122.0/24': { + 'eth0': ['192.168.122.111'] + }, + # this is a link-local address and will be ignored + "fe80::/64": { + "veth0": [ + "fe80::8cf5:25ff:fe1c:d963" + ], + "eth0": [ + "fe80::c7b:cbff:fef6:7370" + ], + "eth1": [ + "fe80::7201:25a7:390b:d9a7" + ] + }, + }) + + haproxy_generated_conf, _ = ingress_svc.haproxy_generate_config( + CephadmDaemonDeploySpec( + host='host1', + daemon_id='ingress', + service_name=ispec.service_name(), + ), + ) + assert haproxy_generated_conf == haproxy_expected_conf + + nfs_generated_conf, _ = nfs_svc.generate_config( + CephadmDaemonDeploySpec( + host='test', + daemon_id='foo.test.0.0', + service_name=nfs_service.service_name(), + ), + ) + assert nfs_generated_conf == nfs_expected_conf + + +class TestCephFsMirror: + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec('cephfs-mirror')): + cephadm_module.assert_issued_mon_command({ + 'prefix': 'mgr module enable', + 'module': 'mirroring' + }) + + +class TestJaeger: + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_jaeger_query(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + spec = TracingSpec(es_nodes="192.168.0.1:9200", + service_type="jaeger-query") + + config = {"elasticsearch_nodes": "http://192.168.0.1:9200"} + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, spec): + _run_cephadm.assert_called_with( + 'test', + "jaeger-query.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-query.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [16686], + }, + "meta": { + 'service_name': 'jaeger-query', + 'ports': [16686], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_jaeger_collector_es_deploy(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + collector_spec = TracingSpec(service_type="jaeger-collector") + es_spec = TracingSpec(service_type="elasticsearch") + es_config = {} + + with with_host(cephadm_module, 'test'): + collector_config = { + "elasticsearch_nodes": f'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'} + with with_service(cephadm_module, es_spec): + _run_cephadm.assert_called_with( + "test", + "elasticsearch.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'elasticsearch.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9200], + }, + "meta": { + 'service_name': 'elasticsearch', + 'ports': [9200], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": es_config, + }), + ) + with with_service(cephadm_module, collector_spec): + _run_cephadm.assert_called_with( + "test", + "jaeger-collector.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-collector.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [14250], + }, + "meta": { + 'service_name': 'jaeger-collector', + 'ports': [14250], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": collector_config, + }), + ) + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_jaeger_agent(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + collector_spec = TracingSpec(service_type="jaeger-collector", es_nodes="192.168.0.1:9200") + collector_config = {"elasticsearch_nodes": "http://192.168.0.1:9200"} + + agent_spec = TracingSpec(service_type="jaeger-agent") + agent_config = {"collector_nodes": "test:14250"} + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, collector_spec): + _run_cephadm.assert_called_with( + "test", + "jaeger-collector.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-collector.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [14250], + }, + "meta": { + 'service_name': 'jaeger-collector', + 'ports': [14250], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": collector_config, + }), + ) + with with_service(cephadm_module, agent_spec): + _run_cephadm.assert_called_with( + "test", + "jaeger-agent.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-agent.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [6799], + }, + "meta": { + 'service_name': 'jaeger-agent', + 'ports': [6799], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": agent_config, + }), + ) diff --git a/src/pybind/mgr/cephadm/tests/test_spec.py b/src/pybind/mgr/cephadm/tests/test_spec.py new file mode 100644 index 000000000..78a2d7311 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_spec.py @@ -0,0 +1,590 @@ +# Disable autopep8 for this file: + +# fmt: off + +import json + +import pytest + +from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, \ + IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec +from orchestrator import DaemonDescription, OrchestratorError + + +@pytest.mark.parametrize( + "spec_json", + json.loads("""[ +{ + "placement": { + "count": 1 + }, + "service_type": "alertmanager" +}, +{ + "placement": { + "host_pattern": "*" + }, + "service_type": "crash" +}, +{ + "placement": { + "count": 1 + }, + "service_type": "grafana", + "protocol": "https" +}, +{ + "placement": { + "count": 2 + }, + "service_type": "mgr" +}, +{ + "placement": { + "count": 5 + }, + "service_type": "mon" +}, +{ + "placement": { + "host_pattern": "*" + }, + "service_type": "node-exporter" +}, +{ + "placement": { + "count": 1 + }, + "service_type": "prometheus" +}, +{ + "placement": { + "hosts": [ + { + "hostname": "ceph-001", + "network": "", + "name": "" + } + ] + }, + "service_type": "rgw", + "service_id": "default-rgw-realm.eu-central-1.1", + "rgw_realm": "default-rgw-realm", + "rgw_zone": "eu-central-1" +}, +{ + "service_type": "osd", + "service_id": "osd_spec_default", + "placement": { + "host_pattern": "*" + }, + "data_devices": { + "model": "MC-55-44-XZ" + }, + "db_devices": { + "model": "SSD-123-foo" + }, + "wal_devices": { + "model": "NVME-QQQQ-987" + } +} +] +""") +) +def test_spec_octopus(spec_json): + # https://tracker.ceph.com/issues/44934 + # Those are real user data from early octopus. + # Please do not modify those JSON values. + + spec = ServiceSpec.from_json(spec_json) + + # just some verification that we can sill read old octopus specs + def convert_to_old_style_json(j): + j_c = dict(j.copy()) + j_c.pop('service_name', None) + if 'spec' in j_c: + spec = j_c.pop('spec') + j_c.update(spec) + if 'placement' in j_c: + if 'hosts' in j_c['placement']: + j_c['placement']['hosts'] = [ + { + 'hostname': HostPlacementSpec.parse(h).hostname, + 'network': HostPlacementSpec.parse(h).network, + 'name': HostPlacementSpec.parse(h).name + } + for h in j_c['placement']['hosts'] + ] + j_c.pop('objectstore', None) + j_c.pop('filter_logic', None) + j_c.pop('anonymous_access', None) + return j_c + + assert spec_json == convert_to_old_style_json(spec.to_json()) + + +@pytest.mark.parametrize( + "dd_json", + json.loads("""[ + { + "hostname": "ceph-001", + "container_id": "d94d7969094d", + "container_image_id": "0881eb8f169f5556a292b4e2c01d683172b12830a62a9225a98a8e206bb734f0", + "container_image_name": "docker.io/prom/alertmanager:latest", + "daemon_id": "ceph-001", + "daemon_type": "alertmanager", + "version": "0.20.0", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.725856", + "created": "2020-04-02T19:23:08.829543", + "started": "2020-04-03T07:29:16.932838", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "c4b036202241", + "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1", + "container_image_name": "docker.io/ceph/ceph:v15", + "daemon_id": "ceph-001", + "daemon_type": "crash", + "version": "15.2.0", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.725903", + "created": "2020-04-02T19:23:11.390694", + "started": "2020-04-03T07:29:16.910897", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "5b7b94b48f31", + "container_image_id": "87a51ecf0b1c9a7b187b21c1b071425dafea0d765a96d5bc371c791169b3d7f4", + "container_image_name": "docker.io/ceph/ceph-grafana:latest", + "daemon_id": "ceph-001", + "daemon_type": "grafana", + "version": "6.6.2", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.725950", + "created": "2020-04-02T19:23:52.025088", + "started": "2020-04-03T07:29:16.847972", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "9ca007280456", + "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1", + "container_image_name": "docker.io/ceph/ceph:v15", + "daemon_id": "ceph-001.gkjwqp", + "daemon_type": "mgr", + "version": "15.2.0", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.725807", + "created": "2020-04-02T19:22:18.648584", + "started": "2020-04-03T07:29:16.856153", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "3d1ba9a2b697", + "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1", + "container_image_name": "docker.io/ceph/ceph:v15", + "daemon_id": "ceph-001", + "daemon_type": "mon", + "version": "15.2.0", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.725715", + "created": "2020-04-02T19:22:13.863300", + "started": "2020-04-03T07:29:17.206024", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "36d026c68ba1", + "container_image_id": "e5a616e4b9cf68dfcad7782b78e118be4310022e874d52da85c55923fb615f87", + "container_image_name": "docker.io/prom/node-exporter:latest", + "daemon_id": "ceph-001", + "daemon_type": "node-exporter", + "version": "0.18.1", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.725996", + "created": "2020-04-02T19:23:53.880197", + "started": "2020-04-03T07:29:16.880044", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "faf76193cbfe", + "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1", + "container_image_name": "docker.io/ceph/ceph:v15", + "daemon_id": "0", + "daemon_type": "osd", + "version": "15.2.0", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.726088", + "created": "2020-04-02T20:35:02.991435", + "started": "2020-04-03T07:29:19.373956", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "f82505bae0f1", + "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1", + "container_image_name": "docker.io/ceph/ceph:v15", + "daemon_id": "1", + "daemon_type": "osd", + "version": "15.2.0", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.726134", + "created": "2020-04-02T20:35:17.142272", + "started": "2020-04-03T07:29:19.374002", + "is_active": false + }, + { + "hostname": "ceph-001", + "container_id": "2708d84cd484", + "container_image_id": "358a0d2395fe711bb8258e8fb4b2d7865c0a9a6463969bcd1452ee8869ea6653", + "container_image_name": "docker.io/prom/prometheus:latest", + "daemon_id": "ceph-001", + "daemon_type": "prometheus", + "version": "2.17.1", + "status": 1, + "status_desc": "running", + "last_refresh": "2020-04-03T15:31:48.726042", + "created": "2020-04-02T19:24:10.281163", + "started": "2020-04-03T07:29:16.926292", + "is_active": false + }, + { + "hostname": "ceph-001", + "daemon_id": "default-rgw-realm.eu-central-1.1.ceph-001.ytywjo", + "daemon_type": "rgw", + "status": 1, + "status_desc": "starting", + "is_active": false + } +]""") +) +def test_dd_octopus(dd_json): + # https://tracker.ceph.com/issues/44934 + # Those are real user data from early octopus. + # Please do not modify those JSON values. + + # Convert datetime properties to old style. + # 2020-04-03T07:29:16.926292Z -> 2020-04-03T07:29:16.926292 + def convert_to_old_style_json(j): + for k in ['last_refresh', 'created', 'started', 'last_deployed', + 'last_configured']: + if k in j: + j[k] = j[k].rstrip('Z') + del j['daemon_name'] + return j + + assert dd_json == convert_to_old_style_json( + DaemonDescription.from_json(dd_json).to_json()) + + +@pytest.mark.parametrize("spec,dd,valid", +[ # noqa: E128 + # https://tracker.ceph.com/issues/44934 + ( + RGWSpec( + service_id="foo", + rgw_realm="default-rgw-realm", + rgw_zone="eu-central-1", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="foo.ceph-001.ytywjo", + hostname="ceph-001", + ), + True + ), + ( + # no realm + RGWSpec( + service_id="foo.bar", + rgw_zone="eu-central-1", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="foo.bar.ceph-001.ytywjo", + hostname="ceph-001", + ), + True + ), + ( + # no realm or zone + RGWSpec( + service_id="bar", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="bar.host.domain.tld.ytywjo", + hostname="host.domain.tld", + ), + True + ), + ( + # explicit naming + RGWSpec( + service_id="realm.zone", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="realm.zone.a", + hostname="smithi028", + ), + True + ), + ( + # without host + RGWSpec( + service_type='rgw', + service_id="foo", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="foo.hostname.ytywjo", + hostname=None, + ), + False + ), + ( + # without host (2) + RGWSpec( + service_type='rgw', + service_id="default-rgw-realm.eu-central-1.1", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="default-rgw-realm.eu-central-1.1.hostname.ytywjo", + hostname=None, + ), + False + ), + ( + # service_id contains hostname + # (sort of) https://tracker.ceph.com/issues/45294 + RGWSpec( + service_id="default.rgw.realm.ceph.001", + ), + DaemonDescription( + daemon_type='rgw', + daemon_id="default.rgw.realm.ceph.001.ceph.001.ytywjo", + hostname="ceph.001", + ), + True + ), + + # https://tracker.ceph.com/issues/45293 + ( + ServiceSpec( + service_type='mds', + service_id="a", + ), + DaemonDescription( + daemon_type='mds', + daemon_id="a.host1.abc123", + hostname="host1", + ), + True + ), + ( + # '.' char in service_id + ServiceSpec( + service_type='mds', + service_id="a.b.c", + ), + DaemonDescription( + daemon_type='mds', + daemon_id="a.b.c.host1.abc123", + hostname="host1", + ), + True + ), + + # https://tracker.ceph.com/issues/45617 + ( + # daemon_id does not contain hostname + ServiceSpec( + service_type='mds', + service_id="a", + ), + DaemonDescription( + daemon_type='mds', + daemon_id="a", + hostname="host1", + ), + True + ), + ( + # daemon_id only contains hostname + ServiceSpec( + service_type='mds', + service_id="host1", + ), + DaemonDescription( + daemon_type='mds', + daemon_id="host1", + hostname="host1", + ), + True + ), + + # https://tracker.ceph.com/issues/45399 + ( + # daemon_id only contains hostname + ServiceSpec( + service_type='mds', + service_id="a", + ), + DaemonDescription( + daemon_type='mds', + daemon_id="a.host1.abc123", + hostname="host1.site", + ), + True + ), + ( + NFSServiceSpec( + service_id="a", + ), + DaemonDescription( + daemon_type='nfs', + daemon_id="a.host1", + hostname="host1.site", + ), + True + ), + + # https://tracker.ceph.com/issues/45293 + ( + NFSServiceSpec( + service_id="a", + ), + DaemonDescription( + daemon_type='nfs', + daemon_id="a.host1", + hostname="host1", + ), + True + ), + ( + # service_id contains a '.' char + NFSServiceSpec( + service_id="a.b.c", + ), + DaemonDescription( + daemon_type='nfs', + daemon_id="a.b.c.host1", + hostname="host1", + ), + True + ), + ( + # trailing chars after hostname + NFSServiceSpec( + service_id="a.b.c", + ), + DaemonDescription( + daemon_type='nfs', + daemon_id="a.b.c.host1.abc123", + hostname="host1", + ), + True + ), + ( + # chars after hostname without '.' + NFSServiceSpec( + service_id="a", + ), + DaemonDescription( + daemon_type='nfs', + daemon_id="a.host1abc123", + hostname="host1", + ), + False + ), + ( + # chars before hostname without '.' + NFSServiceSpec( + service_id="a", + ), + DaemonDescription( + daemon_type='nfs', + daemon_id="ahost1.abc123", + hostname="host1", + ), + False + ), + + # https://tracker.ceph.com/issues/45293 + ( + IscsiServiceSpec( + service_type='iscsi', + service_id="a", + ), + DaemonDescription( + daemon_type='iscsi', + daemon_id="a.host1.abc123", + hostname="host1", + ), + True + ), + ( + # '.' char in service_id + IscsiServiceSpec( + service_type='iscsi', + service_id="a.b.c", + ), + DaemonDescription( + daemon_type='iscsi', + daemon_id="a.b.c.host1.abc123", + hostname="host1", + ), + True + ), + ( + # fixed daemon id for teuthology. + IscsiServiceSpec( + service_type='iscsi', + service_id='iscsi', + ), + DaemonDescription( + daemon_type='iscsi', + daemon_id="iscsi.a", + hostname="host1", + ), + True + ), + + ( + CustomContainerSpec( + service_type='container', + service_id='hello-world', + image='docker.io/library/hello-world:latest', + ), + DaemonDescription( + daemon_type='container', + daemon_id='hello-world.mgr0', + hostname='mgr0', + ), + True + ), + +]) +def test_daemon_description_service_name(spec: ServiceSpec, + dd: DaemonDescription, + valid: bool): + if valid: + assert spec.service_name() == dd.service_name() + else: + with pytest.raises(OrchestratorError): + dd.service_name() diff --git a/src/pybind/mgr/cephadm/tests/test_ssh.py b/src/pybind/mgr/cephadm/tests/test_ssh.py new file mode 100644 index 000000000..29f01b6c7 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_ssh.py @@ -0,0 +1,105 @@ +import asyncssh +from asyncssh.process import SSHCompletedProcess +from unittest import mock +try: + # AsyncMock was not added until python 3.8 + from unittest.mock import AsyncMock +except ImportError: + from asyncmock import AsyncMock +except ImportError: + AsyncMock = None +import pytest + + +try: + from asyncssh.misc import ConnectionLost +except ImportError: + ConnectionLost = None + +from ceph.deployment.hostspec import HostSpec + +from cephadm import CephadmOrchestrator +from cephadm.serve import CephadmServe +from cephadm.tests.fixtures import with_host, wait, async_side_effect +from orchestrator import OrchestratorError + + +@pytest.mark.skipif(ConnectionLost is None, reason='no asyncssh') +class TestWithSSH: + @mock.patch("cephadm.ssh.SSHManager._execute_command") + @mock.patch("cephadm.ssh.SSHManager._check_execute_command") + def test_offline(self, check_execute_command, execute_command, cephadm_module): + check_execute_command.side_effect = async_side_effect('') + execute_command.side_effect = async_side_effect(('', '', 0)) + + if not AsyncMock: + # can't run this test if we could not import AsyncMock + return + mock_connect = AsyncMock(return_value='') + with mock.patch("asyncssh.connect", new=mock_connect) as asyncssh_connect: + with with_host(cephadm_module, 'test'): + asyncssh_connect.side_effect = ConnectionLost('reason') + code, out, err = cephadm_module.check_host('test') + assert out == '' + assert "Failed to connect to test at address (1::4)" in err + + out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() + assert out == HostSpec('test', '1::4', status='Offline').to_json() + + asyncssh_connect.return_value = mock.MagicMock() + asyncssh_connect.side_effect = None + assert CephadmServe(cephadm_module)._check_host('test') is None + out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() + assert out == HostSpec('test', '1::4').to_json() + + def test_ssh_remote_cmds_execution(self, cephadm_module): + + if not AsyncMock: + # can't run this test if we could not import AsyncMock + return + + class FakeConn: + def __init__(self, exception=None, returncode=0): + self.exception = exception + self.returncode = returncode + + async def run(self, *args, **kwargs): + if self.exception: + raise self.exception + else: + return SSHCompletedProcess(returncode=self.returncode, stdout="", stderr="") + + async def close(self): + pass + + def run_test(host, conn, expected_error): + mock_connect = AsyncMock(return_value=conn) + with pytest.raises(OrchestratorError, match=expected_error): + with mock.patch("asyncssh.connect", new=mock_connect): + with with_host(cephadm_module, host): + CephadmServe(cephadm_module)._check_host(host) + + # Test case 1: command failure + run_test('test1', FakeConn(returncode=1), "Command .+ failed") + + # Test case 2: connection error + run_test('test2', FakeConn(exception=asyncssh.ChannelOpenError(1, "", "")), "Unable to reach remote host test2.") + + # Test case 3: asyncssh ProcessError + stderr = "my-process-stderr" + run_test('test3', FakeConn(exception=asyncssh.ProcessError(returncode=3, + env="", + command="", + subsystem="", + exit_status="", + exit_signal="", + stderr=stderr, + stdout="")), f"Cannot execute the command.+{stderr}") + # Test case 4: generic error + run_test('test4', FakeConn(exception=Exception), "Generic error while executing command.+") + + +@pytest.mark.skipif(ConnectionLost is not None, reason='asyncssh') +class TestWithoutSSH: + def test_can_run(self, cephadm_module: CephadmOrchestrator): + assert cephadm_module.can_run() == (False, "loading asyncssh library:No module named 'asyncssh'") diff --git a/src/pybind/mgr/cephadm/tests/test_template.py b/src/pybind/mgr/cephadm/tests/test_template.py new file mode 100644 index 000000000..f67304348 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_template.py @@ -0,0 +1,33 @@ +import pathlib + +import pytest + +from cephadm.template import TemplateMgr, UndefinedError, TemplateNotFoundError + + +def test_render(cephadm_module, fs): + template_base = (pathlib.Path(__file__).parent / '../templates').resolve() + fake_template = template_base / 'foo/bar' + fs.create_file(fake_template, contents='{{ cephadm_managed }}{{ var }}') + + template_mgr = TemplateMgr(cephadm_module) + value = 'test' + + # with base context + expected_text = '{}{}'.format(template_mgr.base_context['cephadm_managed'], value) + assert template_mgr.render('foo/bar', {'var': value}) == expected_text + + # without base context + with pytest.raises(UndefinedError): + template_mgr.render('foo/bar', {'var': value}, managed_context=False) + + # override the base context + context = { + 'cephadm_managed': 'abc', + 'var': value + } + assert template_mgr.render('foo/bar', context) == 'abc{}'.format(value) + + # template not found + with pytest.raises(TemplateNotFoundError): + template_mgr.render('foo/bar/2', {}) diff --git a/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py b/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py new file mode 100644 index 000000000..66feaee31 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py @@ -0,0 +1,256 @@ +import pytest +import json +from tests import mock +from cephadm.tuned_profiles import TunedProfileUtils, SYSCTL_DIR +from cephadm.inventory import TunedProfileStore +from ceph.utils import datetime_now +from ceph.deployment.service_spec import TunedProfileSpec, PlacementSpec +from cephadm.ssh import SSHManager +from orchestrator import HostSpec + +from typing import List, Dict + + +class SaveError(Exception): + pass + + +class FakeCache: + def __init__(self, + hosts, + schedulable_hosts, + unreachable_hosts): + self.hosts = hosts + self.unreachable_hosts = [HostSpec(h) for h in unreachable_hosts] + self.schedulable_hosts = [HostSpec(h) for h in schedulable_hosts] + self.last_tuned_profile_update = {} + + def get_hosts(self): + return self.hosts + + def get_schedulable_hosts(self): + return self.schedulable_hosts + + def get_unreachable_hosts(self): + return self.unreachable_hosts + + def get_draining_hosts(self): + return [] + + def is_host_unreachable(self, hostname: str): + return hostname in [h.hostname for h in self.get_unreachable_hosts()] + + def is_host_schedulable(self, hostname: str): + return hostname in [h.hostname for h in self.get_schedulable_hosts()] + + def is_host_draining(self, hostname: str): + return hostname in [h.hostname for h in self.get_draining_hosts()] + + @property + def networks(self): + return {h: {'a': {'b': ['c']}} for h in self.hosts} + + def host_needs_tuned_profile_update(self, host, profile_name): + return profile_name == 'p2' + + +class FakeMgr: + def __init__(self, + hosts: List[str], + schedulable_hosts: List[str], + unreachable_hosts: List[str], + profiles: Dict[str, TunedProfileSpec]): + self.cache = FakeCache(hosts, schedulable_hosts, unreachable_hosts) + self.tuned_profiles = TunedProfileStore(self) + self.tuned_profiles.profiles = profiles + self.ssh = SSHManager(self) + self.offline_hosts = [] + self.log_refresh_metadata = False + + def set_store(self, what: str, value: str): + raise SaveError(f'{what}: {value}') + + def get_store(self, what: str): + if what == 'tuned_profiles': + return json.dumps({'x': TunedProfileSpec('x', + PlacementSpec(hosts=['x']), + {'x': 'x'}).to_json(), + 'y': TunedProfileSpec('y', + PlacementSpec(hosts=['y']), + {'y': 'y'}).to_json()}) + return '' + + +class TestTunedProfiles: + tspec1 = TunedProfileSpec('p1', + PlacementSpec(hosts=['a', 'b', 'c']), + {'setting1': 'value1', + 'setting2': 'value2', + 'setting with space': 'value with space'}) + tspec2 = TunedProfileSpec('p2', + PlacementSpec(hosts=['a', 'c']), + {'something': 'something_else', + 'high': '5'}) + tspec3 = TunedProfileSpec('p3', + PlacementSpec(hosts=['c']), + {'wow': 'wow2', + 'setting with space': 'value with space', + 'down': 'low'}) + + def profiles_to_calls(self, tp: TunedProfileUtils, profiles: List[TunedProfileSpec]) -> List[Dict[str, str]]: + # this function takes a list of tuned profiles and returns a mapping from + # profile names to the string that will be written to the actual config file on the host. + res = [] + for p in profiles: + p_str = tp._profile_to_str(p) + res.append({p.profile_name: p_str}) + return res + + @mock.patch("cephadm.tuned_profiles.TunedProfileUtils._remove_stray_tuned_profiles") + @mock.patch("cephadm.tuned_profiles.TunedProfileUtils._write_tuned_profiles") + def test_write_all_tuned_profiles(self, _write_profiles, _rm_profiles): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + profiles) + tp = TunedProfileUtils(mgr) + tp._write_all_tuned_profiles() + # need to check that _write_tuned_profiles is correctly called with the + # profiles that match the tuned profile placements and with the correct + # strings that should be generated from the settings the profiles have. + # the _profiles_to_calls helper allows us to generated the input we + # should check against + calls = [ + mock.call('a', self.profiles_to_calls(tp, [self.tspec1, self.tspec2])), + mock.call('b', self.profiles_to_calls(tp, [self.tspec1])), + mock.call('c', self.profiles_to_calls(tp, [self.tspec1, self.tspec2, self.tspec3])) + ] + _write_profiles.assert_has_calls(calls, any_order=True) + + @mock.patch('cephadm.ssh.SSHManager.check_execute_command') + def test_rm_stray_tuned_profiles(self, _check_execute_command): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + # for this test, going to use host "a" and put 4 cephadm generated + # profiles "p1" "p2", "p3" and "who" only two of which should be there ("p1", "p2") + # as well as a file not generated by cephadm. Only the "p3" and "who" + # profiles should be removed from the host. This should total to 4 + # calls to check_execute_command, 1 "ls", 2 "rm", and 1 "sysctl --system" + _check_execute_command.return_value = '\n'.join(['p1-cephadm-tuned-profile.conf', + 'p2-cephadm-tuned-profile.conf', + 'p3-cephadm-tuned-profile.conf', + 'who-cephadm-tuned-profile.conf', + 'dont-touch-me']) + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + profiles) + tp = TunedProfileUtils(mgr) + tp._remove_stray_tuned_profiles('a', self.profiles_to_calls(tp, [self.tspec1, self.tspec2])) + calls = [ + mock.call('a', ['ls', SYSCTL_DIR], log_command=False), + mock.call('a', ['rm', '-f', f'{SYSCTL_DIR}/p3-cephadm-tuned-profile.conf']), + mock.call('a', ['rm', '-f', f'{SYSCTL_DIR}/who-cephadm-tuned-profile.conf']), + mock.call('a', ['sysctl', '--system']) + ] + _check_execute_command.assert_has_calls(calls, any_order=True) + + @mock.patch('cephadm.ssh.SSHManager.check_execute_command') + @mock.patch('cephadm.ssh.SSHManager.write_remote_file') + def test_write_tuned_profiles(self, _write_remote_file, _check_execute_command): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + # for this test we will use host "a" and have it so host_needs_tuned_profile_update + # returns True for p2 and False for p1 (see FakeCache class). So we should see + # 2 ssh calls, one to write p2, one to run sysctl --system + _check_execute_command.return_value = 'success' + _write_remote_file.return_value = 'success' + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + profiles) + tp = TunedProfileUtils(mgr) + tp._write_tuned_profiles('a', self.profiles_to_calls(tp, [self.tspec1, self.tspec2])) + _check_execute_command.assert_called_with('a', ['sysctl', '--system']) + _write_remote_file.assert_called_with( + 'a', f'{SYSCTL_DIR}/p2-cephadm-tuned-profile.conf', tp._profile_to_str(self.tspec2).encode('utf-8')) + + def test_dont_write_to_unreachable_hosts(self): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + + # list host "a" and "b" as hosts that exist, "a" will be + # a normal, schedulable host and "b" is considered unreachable + mgr = FakeMgr(['a', 'b'], + ['a'], + ['b'], + profiles) + tp = TunedProfileUtils(mgr) + + assert 'a' not in tp.mgr.cache.last_tuned_profile_update + assert 'b' not in tp.mgr.cache.last_tuned_profile_update + + # with an online host, should proceed as normal. Providing + # no actual profiles here though so the only actual action taken + # is updating the entry in the last_tuned_profile_update dict + tp._write_tuned_profiles('a', {}) + assert 'a' in tp.mgr.cache.last_tuned_profile_update + + # trying to write to an unreachable host should be a no-op + # and return immediately. No entry for 'b' should be added + # to the last_tuned_profile_update dict + tp._write_tuned_profiles('b', {}) + assert 'b' not in tp.mgr.cache.last_tuned_profile_update + + def test_store(self): + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + {}) + tps = TunedProfileStore(mgr) + save_str_p1 = 'tuned_profiles: ' + json.dumps({'p1': self.tspec1.to_json()}) + tspec1_updated = self.tspec1.copy() + tspec1_updated.settings.update({'new-setting': 'new-value'}) + save_str_p1_updated = 'tuned_profiles: ' + json.dumps({'p1': tspec1_updated.to_json()}) + save_str_p1_updated_p2 = 'tuned_profiles: ' + \ + json.dumps({'p1': tspec1_updated.to_json(), 'p2': self.tspec2.to_json()}) + tspec2_updated = self.tspec2.copy() + tspec2_updated.settings.pop('something') + save_str_p1_updated_p2_updated = 'tuned_profiles: ' + \ + json.dumps({'p1': tspec1_updated.to_json(), 'p2': tspec2_updated.to_json()}) + save_str_p2_updated = 'tuned_profiles: ' + json.dumps({'p2': tspec2_updated.to_json()}) + with pytest.raises(SaveError) as e: + tps.add_profile(self.tspec1) + assert str(e.value) == save_str_p1 + assert 'p1' in tps + with pytest.raises(SaveError) as e: + tps.add_setting('p1', 'new-setting', 'new-value') + assert str(e.value) == save_str_p1_updated + assert 'new-setting' in tps.list_profiles()[0].settings + with pytest.raises(SaveError) as e: + tps.add_profile(self.tspec2) + assert str(e.value) == save_str_p1_updated_p2 + assert 'p2' in tps + assert 'something' in tps.list_profiles()[1].settings + with pytest.raises(SaveError) as e: + tps.rm_setting('p2', 'something') + assert 'something' not in tps.list_profiles()[1].settings + assert str(e.value) == save_str_p1_updated_p2_updated + with pytest.raises(SaveError) as e: + tps.rm_profile('p1') + assert str(e.value) == save_str_p2_updated + assert 'p1' not in tps + assert 'p2' in tps + assert len(tps.list_profiles()) == 1 + assert tps.list_profiles()[0].profile_name == 'p2' + + cur_last_updated = tps.last_updated('p2') + new_last_updated = datetime_now() + assert cur_last_updated != new_last_updated + tps.set_last_updated('p2', new_last_updated) + assert tps.last_updated('p2') == new_last_updated + + # check FakeMgr get_store func to see what is expected to be found in Key Store here + tps.load() + assert 'x' in tps + assert 'y' in tps + assert [p for p in tps.list_profiles() if p.profile_name == 'x'][0].settings == {'x': 'x'} + assert [p for p in tps.list_profiles() if p.profile_name == 'y'][0].settings == {'y': 'y'} diff --git a/src/pybind/mgr/cephadm/tests/test_upgrade.py b/src/pybind/mgr/cephadm/tests/test_upgrade.py new file mode 100644 index 000000000..3b5c305b5 --- /dev/null +++ b/src/pybind/mgr/cephadm/tests/test_upgrade.py @@ -0,0 +1,481 @@ +import json +from unittest import mock + +import pytest + +from ceph.deployment.service_spec import PlacementSpec, ServiceSpec +from cephadm import CephadmOrchestrator +from cephadm.upgrade import CephadmUpgrade, UpgradeState +from cephadm.ssh import HostConnectionError +from cephadm.utils import ContainerInspectInfo +from orchestrator import OrchestratorError, DaemonDescription +from .fixtures import _run_cephadm, wait, with_host, with_service, \ + receive_agent_metadata, async_side_effect + +from typing import List, Tuple, Optional + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +def test_upgrade_start(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_host(cephadm_module, 'test2'): + with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2)), status_running=True): + assert wait(cephadm_module, cephadm_module.upgrade_start( + 'image_id', None)) == 'Initiating upgrade to image_id' + + assert wait(cephadm_module, cephadm_module.upgrade_status() + ).target_image == 'image_id' + + assert wait(cephadm_module, cephadm_module.upgrade_pause() + ) == 'Paused upgrade to image_id' + + assert wait(cephadm_module, cephadm_module.upgrade_resume() + ) == 'Resumed upgrade to image_id' + + assert wait(cephadm_module, cephadm_module.upgrade_stop() + ) == 'Stopped upgrade to image_id' + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +def test_upgrade_start_offline_hosts(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_host(cephadm_module, 'test2'): + cephadm_module.offline_hosts = set(['test2']) + with pytest.raises(OrchestratorError, match=r"Upgrade aborted - Some host\(s\) are currently offline: {'test2'}"): + cephadm_module.upgrade_start('image_id', None) + cephadm_module.offline_hosts = set([]) # so remove_host doesn't fail when leaving the with_host block + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +def test_upgrade_daemons_offline_hosts(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_host(cephadm_module, 'test2'): + cephadm_module.upgrade.upgrade_state = UpgradeState('target_image', 0) + with mock.patch("cephadm.serve.CephadmServe._run_cephadm", side_effect=HostConnectionError('connection failure reason', 'test2', '192.168.122.1')): + _to_upgrade = [(DaemonDescription(daemon_type='crash', daemon_id='test2', hostname='test2'), True)] + with pytest.raises(HostConnectionError, match=r"connection failure reason"): + cephadm_module.upgrade._upgrade_daemons(_to_upgrade, 'target_image', ['digest1']) + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +def test_do_upgrade_offline_hosts(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_host(cephadm_module, 'test2'): + cephadm_module.upgrade.upgrade_state = UpgradeState('target_image', 0) + cephadm_module.offline_hosts = set(['test2']) + with pytest.raises(HostConnectionError, match=r"Host\(s\) were marked offline: {'test2'}"): + cephadm_module.upgrade._do_upgrade() + cephadm_module.offline_hosts = set([]) # so remove_host doesn't fail when leaving the with_host block + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +@mock.patch("cephadm.module.CephadmOrchestrator.remove_health_warning") +def test_upgrade_resume_clear_health_warnings(_rm_health_warning, cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'test'): + with with_host(cephadm_module, 'test2'): + cephadm_module.upgrade.upgrade_state = UpgradeState('target_image', 0, paused=True) + _rm_health_warning.return_value = None + assert wait(cephadm_module, cephadm_module.upgrade_resume() + ) == 'Resumed upgrade to target_image' + calls_list = [mock.call(alert_id) for alert_id in cephadm_module.upgrade.UPGRADE_ERRORS] + _rm_health_warning.assert_has_calls(calls_list, any_order=True) + + +@mock.patch('cephadm.upgrade.CephadmUpgrade._get_current_version', lambda _: (17, 2, 6)) +@mock.patch("cephadm.serve.CephadmServe._get_container_image_info") +def test_upgrade_check_with_ceph_version(_get_img_info, cephadm_module: CephadmOrchestrator): + # This test was added to avoid screwing up the image base so that + # when the version was added to it it made an incorrect image + # The issue caused the image to come out as + # quay.io/ceph/ceph:v18:v18.2.0 + # see https://tracker.ceph.com/issues/63150 + _img = '' + + def _fake_get_img_info(img_name): + nonlocal _img + _img = img_name + return ContainerInspectInfo( + 'image_id', + '18.2.0', + 'digest' + ) + + _get_img_info.side_effect = _fake_get_img_info + cephadm_module.upgrade_check('', '18.2.0') + assert _img == 'quay.io/ceph/ceph:v18.2.0' + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +@pytest.mark.parametrize("use_repo_digest", + [ + False, + True + ]) +def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + with with_host(cephadm_module, 'host2'): + cephadm_module.set_container_image('global', 'from_image') + cephadm_module.use_repo_digest = use_repo_digest + with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*', count=2)), + CephadmOrchestrator.apply_mgr, '', status_running=True), \ + mock.patch("cephadm.module.CephadmOrchestrator.lookup_release_name", + return_value='foo'), \ + mock.patch("cephadm.module.CephadmOrchestrator.version", + new_callable=mock.PropertyMock) as version_mock, \ + mock.patch("cephadm.module.CephadmOrchestrator.get", + return_value={ + # capture fields in both mon and osd maps + "require_osd_release": "pacific", + "min_mon_release": 16, + }): + version_mock.return_value = 'ceph version 18.2.1 (somehash)' + assert wait(cephadm_module, cephadm_module.upgrade_start( + 'to_image', None)) == 'Initiating upgrade to to_image' + + assert wait(cephadm_module, cephadm_module.upgrade_status() + ).target_image == 'to_image' + + def _versions_mock(cmd): + return json.dumps({ + 'mgr': { + 'ceph version 1.2.3 (asdf) blah': 1 + } + }) + + cephadm_module._mon_command_mock_versions = _versions_mock + + with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({ + 'image_id': 'image_id', + 'repo_digests': ['to_image@repo_digest'], + 'ceph_version': 'ceph version 18.2.3 (hash)', + }))): + + cephadm_module.upgrade._do_upgrade() + + assert cephadm_module.upgrade_status is not None + + with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( + json.dumps([ + dict( + name=list(cephadm_module.cache.daemons['host1'].keys())[0], + style='cephadm', + fsid='fsid', + container_id='container_id', + container_image_name='to_image', + container_image_id='image_id', + container_image_digests=['to_image@repo_digest'], + deployed_by=['to_image@repo_digest'], + version='version', + state='running', + ) + ]) + )): + receive_agent_metadata(cephadm_module, 'host1', ['ls']) + receive_agent_metadata(cephadm_module, 'host2', ['ls']) + + with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({ + 'image_id': 'image_id', + 'repo_digests': ['to_image@repo_digest'], + 'ceph_version': 'ceph version 18.2.3 (hash)', + }))): + cephadm_module.upgrade._do_upgrade() + + _, image, _ = cephadm_module.check_mon_command({ + 'prefix': 'config get', + 'who': 'global', + 'key': 'container_image', + }) + if use_repo_digest: + assert image == 'to_image@repo_digest' + else: + assert image == 'to_image' + + +def test_upgrade_state_null(cephadm_module: CephadmOrchestrator): + # This test validates https://tracker.ceph.com/issues/47580 + cephadm_module.set_store('upgrade_state', 'null') + CephadmUpgrade(cephadm_module) + assert CephadmUpgrade(cephadm_module).upgrade_state is None + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +def test_not_enough_mgrs(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=1)), CephadmOrchestrator.apply_mgr, ''): + with pytest.raises(OrchestratorError): + wait(cephadm_module, cephadm_module.upgrade_start('image_id', None)) + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +@mock.patch("cephadm.CephadmOrchestrator.check_mon_command") +def test_enough_mons_for_ok_to_stop(check_mon_command, cephadm_module: CephadmOrchestrator): + # only 2 monitors, not enough for ok-to-stop to ever pass + check_mon_command.return_value = ( + 0, '{"monmap": {"mons": [{"name": "mon.1"}, {"name": "mon.2"}]}}', '') + assert not cephadm_module.upgrade._enough_mons_for_ok_to_stop() + + # 3 monitors, ok-to-stop should work fine + check_mon_command.return_value = ( + 0, '{"monmap": {"mons": [{"name": "mon.1"}, {"name": "mon.2"}, {"name": "mon.3"}]}}', '') + assert cephadm_module.upgrade._enough_mons_for_ok_to_stop() + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +@mock.patch("cephadm.module.HostCache.get_daemons_by_service") +@mock.patch("cephadm.CephadmOrchestrator.get") +def test_enough_mds_for_ok_to_stop(get, get_daemons_by_service, cephadm_module: CephadmOrchestrator): + get.side_effect = [{'filesystems': [{'mdsmap': {'fs_name': 'test', 'max_mds': 1}}]}] + get_daemons_by_service.side_effect = [[DaemonDescription()]] + assert not cephadm_module.upgrade._enough_mds_for_ok_to_stop( + DaemonDescription(daemon_type='mds', daemon_id='test.host1.gfknd', service_name='mds.test')) + + get.side_effect = [{'filesystems': [{'mdsmap': {'fs_name': 'myfs.test', 'max_mds': 2}}]}] + get_daemons_by_service.side_effect = [[DaemonDescription(), DaemonDescription()]] + assert not cephadm_module.upgrade._enough_mds_for_ok_to_stop( + DaemonDescription(daemon_type='mds', daemon_id='myfs.test.host1.gfknd', service_name='mds.myfs.test')) + + get.side_effect = [{'filesystems': [{'mdsmap': {'fs_name': 'myfs.test', 'max_mds': 1}}]}] + get_daemons_by_service.side_effect = [[DaemonDescription(), DaemonDescription()]] + assert cephadm_module.upgrade._enough_mds_for_ok_to_stop( + DaemonDescription(daemon_type='mds', daemon_id='myfs.test.host1.gfknd', service_name='mds.myfs.test')) + + +@pytest.mark.parametrize("current_version, use_tags, show_all_versions, tags, result", + [ + # several candidate versions (from different major versions) + ( + (16, 1, '16.1.0'), + False, # use_tags + False, # show_all_versions + [ + 'v17.1.0', + 'v16.2.7', + 'v16.2.6', + 'v16.2.5', + 'v16.1.4', + 'v16.1.3', + 'v15.2.0', + ], + ['17.1.0', '16.2.7', '16.2.6', '16.2.5', '16.1.4', '16.1.3'] + ), + # candidate minor versions are available + ( + (16, 1, '16.1.0'), + False, # use_tags + False, # show_all_versions + [ + 'v16.2.2', + 'v16.2.1', + 'v16.1.6', + ], + ['16.2.2', '16.2.1', '16.1.6'] + ), + # all versions are less than the current version + ( + (17, 2, '17.2.0'), + False, # use_tags + False, # show_all_versions + [ + 'v17.1.0', + 'v16.2.7', + 'v16.2.6', + ], + [] + ), + # show all versions (regardless of the current version) + ( + (16, 1, '16.1.0'), + False, # use_tags + True, # show_all_versions + [ + 'v17.1.0', + 'v16.2.7', + 'v16.2.6', + 'v15.1.0', + 'v14.2.0', + ], + ['17.1.0', '16.2.7', '16.2.6', '15.1.0', '14.2.0'] + ), + # show all tags (regardless of the current version and show_all_versions flag) + ( + (16, 1, '16.1.0'), + True, # use_tags + False, # show_all_versions + [ + 'v17.1.0', + 'v16.2.7', + 'v16.2.6', + 'v16.2.5', + 'v16.1.4', + 'v16.1.3', + 'v15.2.0', + ], + ['v15.2.0', 'v16.1.3', 'v16.1.4', 'v16.2.5', + 'v16.2.6', 'v16.2.7', 'v17.1.0'] + ), + ]) +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) +def test_upgrade_ls(current_version, use_tags, show_all_versions, tags, result, cephadm_module: CephadmOrchestrator): + with mock.patch('cephadm.upgrade.Registry.get_tags', return_value=tags): + with mock.patch('cephadm.upgrade.CephadmUpgrade._get_current_version', return_value=current_version): + out = cephadm_module.upgrade.upgrade_ls(None, use_tags, show_all_versions) + if use_tags: + assert out['tags'] == result + else: + assert out['versions'] == result + + +@pytest.mark.parametrize( + "upgraded, not_upgraded, daemon_types, hosts, services, should_block", + # [ ([(type, host, id), ... ], [...], [daemon types], [hosts], [services], True/False), ... ] + [ + ( # valid, upgrade mgr daemons + [], + [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')], + ['mgr'], + None, + None, + False + ), + ( # invalid, can't upgrade mons until mgr is upgraded + [], + [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')], + ['mon'], + None, + None, + True + ), + ( # invalid, can't upgrade mon service until all mgr daemons are upgraded + [], + [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')], + None, + None, + ['mon'], + True + ), + ( # valid, upgrade mgr service + [], + [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')], + None, + None, + ['mgr'], + False + ), + ( # valid, mgr is already upgraded so can upgrade mons + [('mgr', 'a', 'a.x')], + [('mon', 'a', 'a')], + ['mon'], + None, + None, + False + ), + ( # invalid, can't upgrade all daemons on b b/c un-upgraded mgr on a + [], + [('mgr', 'b', 'b.y'), ('mon', 'a', 'a')], + None, + ['a'], + None, + True + ), + ( # valid, only daemon on b is a mgr + [], + [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')], + None, + ['b'], + None, + False + ), + ( # invalid, can't upgrade mon on a while mgr on b is un-upgraded + [], + [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')], + None, + ['a'], + None, + True + ), + ( # valid, only upgrading the mgr on a + [], + [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')], + ['mgr'], + ['a'], + None, + False + ), + ( # valid, mgr daemon not on b are upgraded + [('mgr', 'a', 'a.x')], + [('mgr', 'b', 'b.y'), ('mon', 'a', 'a')], + None, + ['b'], + None, + False + ), + ( # valid, all the necessary hosts are covered, mgr on c is already upgraded + [('mgr', 'c', 'c.z')], + [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a'), ('osd', 'c', '0')], + None, + ['a', 'b'], + None, + False + ), + ( # invalid, can't upgrade mon on a while mgr on b is un-upgraded + [], + [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')], + ['mgr', 'mon'], + ['a'], + None, + True + ), + ( # valid, only mon not on "b" is upgraded already. Case hit while making teuthology test + [('mon', 'a', 'a')], + [('mon', 'b', 'x'), ('mon', 'b', 'y'), ('osd', 'a', '1'), ('osd', 'b', '2')], + ['mon', 'osd'], + ['b'], + None, + False + ), + ] +) +@mock.patch("cephadm.module.HostCache.get_daemons") +@mock.patch("cephadm.serve.CephadmServe._get_container_image_info") +@mock.patch('cephadm.module.SpecStore.__getitem__') +def test_staggered_upgrade_validation( + get_spec, + get_image_info, + get_daemons, + upgraded: List[Tuple[str, str, str]], + not_upgraded: List[Tuple[str, str, str, str]], + daemon_types: Optional[str], + hosts: Optional[str], + services: Optional[str], + should_block: bool, + cephadm_module: CephadmOrchestrator, +): + def to_dds(ts: List[Tuple[str, str]], upgraded: bool) -> List[DaemonDescription]: + dds = [] + digest = 'new_image@repo_digest' if upgraded else 'old_image@repo_digest' + for t in ts: + dds.append(DaemonDescription(daemon_type=t[0], + hostname=t[1], + daemon_id=t[2], + container_image_digests=[digest], + deployed_by=[digest],)) + return dds + get_daemons.return_value = to_dds(upgraded, True) + to_dds(not_upgraded, False) + get_image_info.side_effect = async_side_effect( + ('new_id', 'ceph version 99.99.99 (hash)', ['new_image@repo_digest'])) + + class FakeSpecDesc(): + def __init__(self, spec): + self.spec = spec + + def _get_spec(s): + return FakeSpecDesc(ServiceSpec(s)) + + get_spec.side_effect = _get_spec + if should_block: + with pytest.raises(OrchestratorError): + cephadm_module.upgrade._validate_upgrade_filters( + 'new_image_name', daemon_types, hosts, services) + else: + cephadm_module.upgrade._validate_upgrade_filters( + 'new_image_name', daemon_types, hosts, services) diff --git a/src/pybind/mgr/cephadm/tuned_profiles.py b/src/pybind/mgr/cephadm/tuned_profiles.py new file mode 100644 index 000000000..8ec30bd53 --- /dev/null +++ b/src/pybind/mgr/cephadm/tuned_profiles.py @@ -0,0 +1,103 @@ +import logging +from typing import Dict, List, TYPE_CHECKING +from ceph.utils import datetime_now +from .schedule import HostAssignment +from ceph.deployment.service_spec import ServiceSpec, TunedProfileSpec + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + +SYSCTL_DIR = '/etc/sysctl.d' + + +class TunedProfileUtils(): + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr = mgr + + def _profile_to_str(self, p: TunedProfileSpec) -> str: + p_str = f'# created by cephadm\n# tuned profile "{p.profile_name}"\n\n' + for k, v in p.settings.items(): + p_str += f'{k} = {v}\n' + return p_str + + def _write_all_tuned_profiles(self) -> None: + host_profile_mapping: Dict[str, List[Dict[str, str]]] = {} + for host in self.mgr.cache.get_hosts(): + host_profile_mapping[host] = [] + + for profile in self.mgr.tuned_profiles.list_profiles(): + p_str = self._profile_to_str(profile) + ha = HostAssignment( + spec=ServiceSpec( + 'crash', placement=profile.placement), + hosts=self.mgr.cache.get_schedulable_hosts(), + unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), + daemons=[], + networks=self.mgr.cache.networks, + ) + all_slots, _, _ = ha.place() + for host in {s.hostname for s in all_slots}: + host_profile_mapping[host].append({profile.profile_name: p_str}) + + for host, profiles in host_profile_mapping.items(): + self._remove_stray_tuned_profiles(host, profiles) + self._write_tuned_profiles(host, profiles) + + def _remove_stray_tuned_profiles(self, host: str, profiles: List[Dict[str, str]]) -> None: + """ + this function looks at the contents of /etc/sysctl.d/ for profiles we have written + that should now be removed. It assumes any file with "-cephadm-tuned-profile.conf" in + it is written by us any without that are not. Only files written by us are considered + candidates for removal. The "profiles" parameter is a list of dictionaries that map + profile names to the file contents to actually be written to the + /etc/sysctl.d/-cephadm-tuned-profile.conf. For example + [ + { + 'profile1': 'setting1: value1\nsetting2: value2' + }, + { + 'profile2': 'setting3: value3' + } + ] + what we want to end up doing is going through the keys of the dicts and appending + -cephadm-tuned-profile.conf to the profile names to build our list of profile files that + SHOULD be on the host. Then if we see any file names that don't match this, but + DO include "-cephadm-tuned-profile.conf" (implying they're from us), remove them. + """ + if self.mgr.cache.is_host_unreachable(host): + return + cmd = ['ls', SYSCTL_DIR] + found_files = self.mgr.ssh.check_execute_command(host, cmd, log_command=self.mgr.log_refresh_metadata).split('\n') + found_files = [s.strip() for s in found_files] + profile_names: List[str] = sum([[*p] for p in profiles], []) # extract all profiles names + profile_names = list(set(profile_names)) # remove duplicates + expected_files = [p + '-cephadm-tuned-profile.conf' for p in profile_names] + updated = False + for file in found_files: + if '-cephadm-tuned-profile.conf' not in file: + continue + if file not in expected_files: + logger.info(f'Removing stray tuned profile file {file}') + cmd = ['rm', '-f', f'{SYSCTL_DIR}/{file}'] + self.mgr.ssh.check_execute_command(host, cmd) + updated = True + if updated: + self.mgr.ssh.check_execute_command(host, ['sysctl', '--system']) + + def _write_tuned_profiles(self, host: str, profiles: List[Dict[str, str]]) -> None: + if self.mgr.cache.is_host_unreachable(host): + return + updated = False + for p in profiles: + for profile_name, content in p.items(): + if self.mgr.cache.host_needs_tuned_profile_update(host, profile_name): + logger.info(f'Writing tuned profile {profile_name} to host {host}') + profile_filename: str = f'{SYSCTL_DIR}/{profile_name}-cephadm-tuned-profile.conf' + self.mgr.ssh.write_remote_file(host, profile_filename, content.encode('utf-8')) + updated = True + if updated: + self.mgr.ssh.check_execute_command(host, ['sysctl', '--system']) + self.mgr.cache.last_tuned_profile_update[host] = datetime_now() diff --git a/src/pybind/mgr/cephadm/upgrade.py b/src/pybind/mgr/cephadm/upgrade.py new file mode 100644 index 000000000..eeae37580 --- /dev/null +++ b/src/pybind/mgr/cephadm/upgrade.py @@ -0,0 +1,1294 @@ +import json +import logging +import time +import uuid +from typing import TYPE_CHECKING, Optional, Dict, List, Tuple, Any, cast + +import orchestrator +from cephadm.registry import Registry +from cephadm.serve import CephadmServe +from cephadm.services.cephadmservice import CephadmDaemonDeploySpec +from cephadm.utils import ceph_release_to_major, name_to_config_section, CEPH_UPGRADE_ORDER, \ + CEPH_TYPES, NON_CEPH_IMAGE_TYPES, GATEWAY_TYPES +from cephadm.ssh import HostConnectionError +from orchestrator import OrchestratorError, DaemonDescription, DaemonDescriptionStatus, daemon_type_to_service + +if TYPE_CHECKING: + from .module import CephadmOrchestrator + + +logger = logging.getLogger(__name__) + +# from ceph_fs.h +CEPH_MDSMAP_ALLOW_STANDBY_REPLAY = (1 << 5) +CEPH_MDSMAP_NOT_JOINABLE = (1 << 0) + + +def normalize_image_digest(digest: str, default_registry: str) -> str: + """ + Normal case: + >>> normalize_image_digest('ceph/ceph', 'docker.io') + 'docker.io/ceph/ceph' + + No change: + >>> normalize_image_digest('quay.ceph.io/ceph/ceph', 'docker.io') + 'quay.ceph.io/ceph/ceph' + + >>> normalize_image_digest('docker.io/ubuntu', 'docker.io') + 'docker.io/ubuntu' + + >>> normalize_image_digest('localhost/ceph', 'docker.io') + 'localhost/ceph' + """ + known_shortnames = [ + 'ceph/ceph', + 'ceph/daemon', + 'ceph/daemon-base', + ] + for image in known_shortnames: + if digest.startswith(image): + return f'{default_registry}/{digest}' + return digest + + +class UpgradeState: + def __init__(self, + target_name: str, + progress_id: str, + target_id: Optional[str] = None, + target_digests: Optional[List[str]] = None, + target_version: Optional[str] = None, + error: Optional[str] = None, + paused: Optional[bool] = None, + fail_fs: bool = False, + fs_original_max_mds: Optional[Dict[str, int]] = None, + fs_original_allow_standby_replay: Optional[Dict[str, bool]] = None, + daemon_types: Optional[List[str]] = None, + hosts: Optional[List[str]] = None, + services: Optional[List[str]] = None, + total_count: Optional[int] = None, + remaining_count: Optional[int] = None, + ): + self._target_name: str = target_name # Use CephadmUpgrade.target_image instead. + self.progress_id: str = progress_id + self.target_id: Optional[str] = target_id + self.target_digests: Optional[List[str]] = target_digests + self.target_version: Optional[str] = target_version + self.error: Optional[str] = error + self.paused: bool = paused or False + self.fs_original_max_mds: Optional[Dict[str, int]] = fs_original_max_mds + self.fs_original_allow_standby_replay: Optional[Dict[str, + bool]] = fs_original_allow_standby_replay + self.fail_fs = fail_fs + self.daemon_types = daemon_types + self.hosts = hosts + self.services = services + self.total_count = total_count + self.remaining_count = remaining_count + + def to_json(self) -> dict: + return { + 'target_name': self._target_name, + 'progress_id': self.progress_id, + 'target_id': self.target_id, + 'target_digests': self.target_digests, + 'target_version': self.target_version, + 'fail_fs': self.fail_fs, + 'fs_original_max_mds': self.fs_original_max_mds, + 'fs_original_allow_standby_replay': self.fs_original_allow_standby_replay, + 'error': self.error, + 'paused': self.paused, + 'daemon_types': self.daemon_types, + 'hosts': self.hosts, + 'services': self.services, + 'total_count': self.total_count, + 'remaining_count': self.remaining_count, + } + + @classmethod + def from_json(cls, data: dict) -> Optional['UpgradeState']: + valid_params = UpgradeState.__init__.__code__.co_varnames + if data: + c = {k: v for k, v in data.items() if k in valid_params} + if 'repo_digest' in c: + c['target_digests'] = [c.pop('repo_digest')] + return cls(**c) + else: + return None + + +class CephadmUpgrade: + UPGRADE_ERRORS = [ + 'UPGRADE_NO_STANDBY_MGR', + 'UPGRADE_FAILED_PULL', + 'UPGRADE_REDEPLOY_DAEMON', + 'UPGRADE_BAD_TARGET_VERSION', + 'UPGRADE_EXCEPTION', + 'UPGRADE_OFFLINE_HOST' + ] + + def __init__(self, mgr: "CephadmOrchestrator"): + self.mgr = mgr + + t = self.mgr.get_store('upgrade_state') + if t: + self.upgrade_state: Optional[UpgradeState] = UpgradeState.from_json(json.loads(t)) + else: + self.upgrade_state = None + self.upgrade_info_str: str = '' + + @property + def target_image(self) -> str: + assert self.upgrade_state + if not self.mgr.use_repo_digest: + return self.upgrade_state._target_name + if not self.upgrade_state.target_digests: + return self.upgrade_state._target_name + + # FIXME: we assume the first digest is the best one to use + return self.upgrade_state.target_digests[0] + + def upgrade_status(self) -> orchestrator.UpgradeStatusSpec: + r = orchestrator.UpgradeStatusSpec() + if self.upgrade_state: + r.target_image = self.target_image + r.in_progress = True + r.progress, r.services_complete = self._get_upgrade_info() + r.is_paused = self.upgrade_state.paused + + if self.upgrade_state.daemon_types is not None: + which_str = f'Upgrading daemons of type(s) {",".join(self.upgrade_state.daemon_types)}' + if self.upgrade_state.hosts is not None: + which_str += f' on host(s) {",".join(self.upgrade_state.hosts)}' + elif self.upgrade_state.services is not None: + which_str = f'Upgrading daemons in service(s) {",".join(self.upgrade_state.services)}' + if self.upgrade_state.hosts is not None: + which_str += f' on host(s) {",".join(self.upgrade_state.hosts)}' + elif self.upgrade_state.hosts is not None: + which_str = f'Upgrading all daemons on host(s) {",".join(self.upgrade_state.hosts)}' + else: + which_str = 'Upgrading all daemon types on all hosts' + if self.upgrade_state.total_count is not None and self.upgrade_state.remaining_count is not None: + which_str += f'. Upgrade limited to {self.upgrade_state.total_count} daemons ({self.upgrade_state.remaining_count} remaining).' + r.which = which_str + + # accessing self.upgrade_info_str will throw an exception if it + # has not been set in _do_upgrade yet + try: + r.message = self.upgrade_info_str + except AttributeError: + pass + if self.upgrade_state.error: + r.message = 'Error: ' + self.upgrade_state.error + elif self.upgrade_state.paused: + r.message = 'Upgrade paused' + return r + + def _get_upgrade_info(self) -> Tuple[str, List[str]]: + if not self.upgrade_state or not self.upgrade_state.target_digests: + return '', [] + + daemons = self._get_filtered_daemons() + + if any(not d.container_image_digests for d in daemons if d.daemon_type == 'mgr'): + return '', [] + + completed_daemons = [(d.daemon_type, any(d in self.upgrade_state.target_digests for d in ( + d.container_image_digests or []))) for d in daemons if d.daemon_type] + + done = len([True for completion in completed_daemons if completion[1]]) + + completed_types = list(set([completion[0] for completion in completed_daemons if all( + c[1] for c in completed_daemons if c[0] == completion[0])])) + + return '%s/%s daemons upgraded' % (done, len(daemons)), completed_types + + def _get_filtered_daemons(self) -> List[DaemonDescription]: + # Return the set of daemons set to be upgraded with out current + # filtering parameters (or all daemons in upgrade order if no filtering + # parameter are set). + assert self.upgrade_state is not None + if self.upgrade_state.daemon_types is not None: + daemons = [d for d in self.mgr.cache.get_daemons( + ) if d.daemon_type in self.upgrade_state.daemon_types] + elif self.upgrade_state.services is not None: + daemons = [] + for service in self.upgrade_state.services: + daemons += self.mgr.cache.get_daemons_by_service(service) + else: + daemons = [d for d in self.mgr.cache.get_daemons( + ) if d.daemon_type in CEPH_UPGRADE_ORDER] + if self.upgrade_state.hosts is not None: + daemons = [d for d in daemons if d.hostname in self.upgrade_state.hosts] + return daemons + + def _get_current_version(self) -> Tuple[int, int, str]: + current_version = self.mgr.version.split('ceph version ')[1] + (current_major, current_minor, _) = current_version.split('-')[0].split('.', 2) + return (int(current_major), int(current_minor), current_version) + + def _check_target_version(self, version: str) -> Optional[str]: + try: + v = version.split('.', 2) + (major, minor) = (int(v[0]), int(v[1])) + assert minor >= 0 + # patch might be a number or {number}-g{sha1} + except ValueError: + return 'version must be in the form X.Y.Z (e.g., 15.2.3)' + if major < 15 or (major == 15 and minor < 2): + return 'cephadm only supports octopus (15.2.0) or later' + + # to far a jump? + (current_major, current_minor, current_version) = self._get_current_version() + if current_major < major - 2: + return f'ceph can only upgrade 1 or 2 major versions at a time; {current_version} -> {version} is too big a jump' + if current_major > major: + return f'ceph cannot downgrade major versions (from {current_version} to {version})' + if current_major == major: + if current_minor > minor: + return f'ceph cannot downgrade to a {"rc" if minor == 1 else "dev"} release' + + # check mon min + monmap = self.mgr.get("mon_map") + mon_min = monmap.get("min_mon_release", 0) + if mon_min < major - 2: + return f'min_mon_release ({mon_min}) < target {major} - 2; first complete an upgrade to an earlier release' + + # check osd min + osdmap = self.mgr.get("osd_map") + osd_min_name = osdmap.get("require_osd_release", "argonaut") + osd_min = ceph_release_to_major(osd_min_name) + if osd_min < major - 2: + return f'require_osd_release ({osd_min_name} or {osd_min}) < target {major} - 2; first complete an upgrade to an earlier release' + + return None + + def upgrade_ls(self, image: Optional[str], tags: bool, show_all_versions: Optional[bool]) -> Dict: + if not image: + image = self.mgr.container_image_base + reg_name, bare_image = image.split('/', 1) + if ':' in bare_image: + # for our purposes, we don't want to use the tag here + bare_image = bare_image.split(':')[0] + reg = Registry(reg_name) + (current_major, current_minor, _) = self._get_current_version() + versions = [] + r: Dict[Any, Any] = { + "image": image, + "registry": reg_name, + "bare_image": bare_image, + } + + try: + ls = reg.get_tags(bare_image) + except ValueError as e: + raise OrchestratorError(f'{e}') + if not tags: + for t in ls: + if t[0] != 'v': + continue + v = t[1:].split('.') + if len(v) != 3: + continue + if '-' in v[2]: + continue + v_major = int(v[0]) + v_minor = int(v[1]) + candidate_version = (v_major > current_major + or (v_major == current_major and v_minor >= current_minor)) + if show_all_versions or candidate_version: + versions.append('.'.join(v)) + r["versions"] = sorted( + versions, + key=lambda k: list(map(int, k.split('.'))), + reverse=True + ) + else: + r["tags"] = sorted(ls) + return r + + def upgrade_start(self, image: str, version: str, daemon_types: Optional[List[str]] = None, + hosts: Optional[List[str]] = None, services: Optional[List[str]] = None, limit: Optional[int] = None) -> str: + fail_fs_value = cast(bool, self.mgr.get_module_option_ex( + 'orchestrator', 'fail_fs', False)) + if self.mgr.mode != 'root': + raise OrchestratorError('upgrade is not supported in %s mode' % ( + self.mgr.mode)) + if version: + version_error = self._check_target_version(version) + if version_error: + raise OrchestratorError(version_error) + target_name = self.mgr.container_image_base + ':v' + version + elif image: + target_name = normalize_image_digest(image, self.mgr.default_registry) + else: + raise OrchestratorError('must specify either image or version') + + if daemon_types is not None or services is not None or hosts is not None: + self._validate_upgrade_filters(target_name, daemon_types, hosts, services) + + if self.upgrade_state: + if self.upgrade_state._target_name != target_name: + raise OrchestratorError( + 'Upgrade to %s (not %s) already in progress' % + (self.upgrade_state._target_name, target_name)) + if self.upgrade_state.paused: + self.upgrade_state.paused = False + self._save_upgrade_state() + return 'Resumed upgrade to %s' % self.target_image + return 'Upgrade to %s in progress' % self.target_image + + running_mgr_count = len([daemon for daemon in self.mgr.cache.get_daemons_by_type( + 'mgr') if daemon.status == DaemonDescriptionStatus.running]) + + if running_mgr_count < 2: + raise OrchestratorError('Need at least 2 running mgr daemons for upgrade') + + self.mgr.log.info('Upgrade: Started with target %s' % target_name) + self.upgrade_state = UpgradeState( + target_name=target_name, + progress_id=str(uuid.uuid4()), + fail_fs=fail_fs_value, + daemon_types=daemon_types, + hosts=hosts, + services=services, + total_count=limit, + remaining_count=limit, + ) + self._update_upgrade_progress(0.0) + self._save_upgrade_state() + self._clear_upgrade_health_checks() + self.mgr.event.set() + return 'Initiating upgrade to %s' % (target_name) + + def _validate_upgrade_filters(self, target_name: str, daemon_types: Optional[List[str]] = None, hosts: Optional[List[str]] = None, services: Optional[List[str]] = None) -> None: + def _latest_type(dtypes: List[str]) -> str: + # [::-1] gives the list in reverse + for daemon_type in CEPH_UPGRADE_ORDER[::-1]: + if daemon_type in dtypes: + return daemon_type + return '' + + def _get_earlier_daemons(dtypes: List[str], candidates: List[DaemonDescription]) -> List[DaemonDescription]: + # this function takes a list of daemon types and first finds the daemon + # type from that list that is latest in our upgrade order. Then, from + # that latest type, it filters the list of candidate daemons received + # for daemons with types earlier in the upgrade order than the latest + # type found earlier. That filtered list of daemons is returned. The + # purpose of this function is to help in finding daemons that must have + # already been upgraded for the given filtering parameters (--daemon-types, + # --services, --hosts) to be valid. + latest = _latest_type(dtypes) + if not latest: + return [] + earlier_types = '|'.join(CEPH_UPGRADE_ORDER).split(latest)[0].split('|')[:-1] + earlier_types = [t for t in earlier_types if t not in dtypes] + return [d for d in candidates if d.daemon_type in earlier_types] + + if self.upgrade_state: + raise OrchestratorError( + 'Cannot set values for --daemon-types, --services or --hosts when upgrade already in progress.') + try: + with self.mgr.async_timeout_handler('cephadm inspect-image'): + target_id, target_version, target_digests = self.mgr.wait_async( + CephadmServe(self.mgr)._get_container_image_info(target_name)) + except OrchestratorError as e: + raise OrchestratorError(f'Failed to pull {target_name}: {str(e)}') + # what we need to do here is build a list of daemons that must already be upgraded + # in order for the user's selection of daemons to upgrade to be valid. for example, + # if they say --daemon-types 'osd,mds' but mons have not been upgraded, we block. + daemons = [d for d in self.mgr.cache.get_daemons( + ) if d.daemon_type not in NON_CEPH_IMAGE_TYPES] + err_msg_base = 'Cannot start upgrade. ' + # "dtypes" will later be filled in with the types of daemons that will be upgraded with the given parameters + dtypes = [] + if daemon_types is not None: + dtypes = daemon_types + if hosts is not None: + dtypes = [_latest_type(dtypes)] + other_host_daemons = [ + d for d in daemons if d.hostname is not None and d.hostname not in hosts] + daemons = _get_earlier_daemons(dtypes, other_host_daemons) + else: + daemons = _get_earlier_daemons(dtypes, daemons) + err_msg_base += 'Daemons with types earlier in upgrade order than given types need upgrading.\n' + elif services is not None: + # for our purposes here we can effectively convert our list of services into the + # set of daemon types the services contain. This works because we don't allow --services + # and --daemon-types at the same time and we only allow services of the same type + sspecs = [ + self.mgr.spec_store[s].spec for s in services if self.mgr.spec_store[s].spec is not None] + stypes = list(set([s.service_type for s in sspecs])) + if len(stypes) != 1: + raise OrchestratorError('Doing upgrade by service only support services of one type at ' + f'a time. Found service types: {stypes}') + for stype in stypes: + dtypes += orchestrator.service_to_daemon_types(stype) + dtypes = list(set(dtypes)) + if hosts is not None: + other_host_daemons = [ + d for d in daemons if d.hostname is not None and d.hostname not in hosts] + daemons = _get_earlier_daemons(dtypes, other_host_daemons) + else: + daemons = _get_earlier_daemons(dtypes, daemons) + err_msg_base += 'Daemons with types earlier in upgrade order than daemons from given services need upgrading.\n' + elif hosts is not None: + # hosts must be handled a bit differently. For this, we really need to find all the daemon types + # that reside on hosts in the list of hosts we will upgrade. Then take the type from + # that list that is latest in the upgrade order and check if any daemons on hosts not in the + # provided list of hosts have a daemon with a type earlier in the upgrade order that is not upgraded. + dtypes = list( + set([d.daemon_type for d in daemons if d.daemon_type is not None and d.hostname in hosts])) + other_hosts_daemons = [ + d for d in daemons if d.hostname is not None and d.hostname not in hosts] + daemons = _get_earlier_daemons([_latest_type(dtypes)], other_hosts_daemons) + err_msg_base += 'Daemons with types earlier in upgrade order than daemons on given host need upgrading.\n' + need_upgrade_self, n1, n2, _ = self._detect_need_upgrade(daemons, target_digests, target_name) + if need_upgrade_self and ('mgr' not in dtypes or (daemon_types is None and services is None)): + # also report active mgr as needing to be upgraded. It is not included in the resulting list + # by default as it is treated special and handled via the need_upgrade_self bool + n1.insert(0, (self.mgr.mgr_service.get_active_daemon( + self.mgr.cache.get_daemons_by_type('mgr')), True)) + if n1 or n2: + raise OrchestratorError(f'{err_msg_base}Please first upgrade ' + f'{", ".join(list(set([d[0].name() for d in n1] + [d[0].name() for d in n2])))}\n' + f'NOTE: Enforced upgrade order is: {" -> ".join(CEPH_TYPES + GATEWAY_TYPES)}') + + def upgrade_pause(self) -> str: + if not self.upgrade_state: + raise OrchestratorError('No upgrade in progress') + if self.upgrade_state.paused: + return 'Upgrade to %s already paused' % self.target_image + self.upgrade_state.paused = True + self.mgr.log.info('Upgrade: Paused upgrade to %s' % self.target_image) + self._save_upgrade_state() + return 'Paused upgrade to %s' % self.target_image + + def upgrade_resume(self) -> str: + if not self.upgrade_state: + raise OrchestratorError('No upgrade in progress') + if not self.upgrade_state.paused: + return 'Upgrade to %s not paused' % self.target_image + self.upgrade_state.paused = False + self.upgrade_state.error = '' + self.mgr.log.info('Upgrade: Resumed upgrade to %s' % self.target_image) + self._save_upgrade_state() + self.mgr.event.set() + for alert_id in self.UPGRADE_ERRORS: + self.mgr.remove_health_warning(alert_id) + return 'Resumed upgrade to %s' % self.target_image + + def upgrade_stop(self) -> str: + if not self.upgrade_state: + return 'No upgrade in progress' + if self.upgrade_state.progress_id: + self.mgr.remote('progress', 'complete', + self.upgrade_state.progress_id) + target_image = self.target_image + self.mgr.log.info('Upgrade: Stopped') + self.upgrade_state = None + self._save_upgrade_state() + self._clear_upgrade_health_checks() + self.mgr.event.set() + return 'Stopped upgrade to %s' % target_image + + def continue_upgrade(self) -> bool: + """ + Returns false, if nothing was done. + :return: + """ + if self.upgrade_state and not self.upgrade_state.paused: + try: + self._do_upgrade() + except HostConnectionError as e: + self._fail_upgrade('UPGRADE_OFFLINE_HOST', { + 'severity': 'error', + 'summary': f'Upgrade: Failed to connect to host {e.hostname} at addr ({e.addr})', + 'count': 1, + 'detail': [f'SSH connection failed to {e.hostname} at addr ({e.addr}): {str(e)}'], + }) + return False + except Exception as e: + self._fail_upgrade('UPGRADE_EXCEPTION', { + 'severity': 'error', + 'summary': 'Upgrade: failed due to an unexpected exception', + 'count': 1, + 'detail': [f'Unexpected exception occurred during upgrade process: {str(e)}'], + }) + return False + return True + return False + + def _wait_for_ok_to_stop( + self, s: DaemonDescription, + known: Optional[List[str]] = None, # NOTE: output argument! + ) -> bool: + # only wait a little bit; the service might go away for something + assert s.daemon_type is not None + assert s.daemon_id is not None + tries = 4 + while tries > 0: + if not self.upgrade_state or self.upgrade_state.paused: + return False + + # setting force flag to retain old functionality. + # note that known is an output argument for ok_to_stop() + r = self.mgr.cephadm_services[daemon_type_to_service(s.daemon_type)].ok_to_stop([ + s.daemon_id], known=known, force=True) + + if not r.retval: + logger.info(f'Upgrade: {r.stdout}') + return True + logger.info(f'Upgrade: {r.stderr}') + + time.sleep(15) + tries -= 1 + return False + + def _clear_upgrade_health_checks(self) -> None: + for k in self.UPGRADE_ERRORS: + if k in self.mgr.health_checks: + del self.mgr.health_checks[k] + self.mgr.set_health_checks(self.mgr.health_checks) + + def _fail_upgrade(self, alert_id: str, alert: dict) -> None: + assert alert_id in self.UPGRADE_ERRORS + if not self.upgrade_state: + # this could happen if the user canceled the upgrade while we + # were doing something + return + + logger.error('Upgrade: Paused due to %s: %s' % (alert_id, + alert['summary'])) + self.upgrade_state.error = alert_id + ': ' + alert['summary'] + self.upgrade_state.paused = True + self._save_upgrade_state() + self.mgr.health_checks[alert_id] = alert + self.mgr.set_health_checks(self.mgr.health_checks) + + def _update_upgrade_progress(self, progress: float) -> None: + if not self.upgrade_state: + assert False, 'No upgrade in progress' + + if not self.upgrade_state.progress_id: + self.upgrade_state.progress_id = str(uuid.uuid4()) + self._save_upgrade_state() + self.mgr.remote('progress', 'update', self.upgrade_state.progress_id, + ev_msg='Upgrade to %s' % ( + self.upgrade_state.target_version or self.target_image + ), + ev_progress=progress, + add_to_ceph_s=True) + + def _save_upgrade_state(self) -> None: + if not self.upgrade_state: + self.mgr.set_store('upgrade_state', None) + return + self.mgr.set_store('upgrade_state', json.dumps(self.upgrade_state.to_json())) + + def get_distinct_container_image_settings(self) -> Dict[str, str]: + # get all distinct container_image settings + image_settings = {} + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config dump', + 'format': 'json', + }) + config = json.loads(out) + for opt in config: + if opt['name'] == 'container_image': + image_settings[opt['section']] = opt['value'] + return image_settings + + def _prepare_for_mds_upgrade( + self, + target_major: str, + need_upgrade: List[DaemonDescription] + ) -> bool: + # scale down all filesystems to 1 MDS + assert self.upgrade_state + if not self.upgrade_state.fs_original_max_mds: + self.upgrade_state.fs_original_max_mds = {} + if not self.upgrade_state.fs_original_allow_standby_replay: + self.upgrade_state.fs_original_allow_standby_replay = {} + fsmap = self.mgr.get("fs_map") + continue_upgrade = True + for fs in fsmap.get('filesystems', []): + fscid = fs["id"] + mdsmap = fs["mdsmap"] + fs_name = mdsmap["fs_name"] + + # disable allow_standby_replay? + if mdsmap['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY: + self.mgr.log.info('Upgrade: Disabling standby-replay for filesystem %s' % ( + fs_name + )) + if fscid not in self.upgrade_state.fs_original_allow_standby_replay: + self.upgrade_state.fs_original_allow_standby_replay[fscid] = True + self._save_upgrade_state() + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'fs set', + 'fs_name': fs_name, + 'var': 'allow_standby_replay', + 'val': '0', + }) + continue_upgrade = False + continue + + # scale down this filesystem? + if mdsmap["max_mds"] > 1: + if self.upgrade_state.fail_fs: + if not (mdsmap['flags'] & CEPH_MDSMAP_NOT_JOINABLE) and \ + len(mdsmap['up']) > 0: + self.mgr.log.info(f'Upgrade: failing fs {fs_name} for ' + f'rapid multi-rank mds upgrade') + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'fs fail', + 'fs_name': fs_name + }) + if ret != 0: + continue_upgrade = False + continue + else: + self.mgr.log.info('Upgrade: Scaling down filesystem %s' % ( + fs_name + )) + if fscid not in self.upgrade_state.fs_original_max_mds: + self.upgrade_state.fs_original_max_mds[fscid] = \ + mdsmap['max_mds'] + self._save_upgrade_state() + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'fs set', + 'fs_name': fs_name, + 'var': 'max_mds', + 'val': '1', + }) + continue_upgrade = False + continue + + if not self.upgrade_state.fail_fs: + if not (mdsmap['in'] == [0] and len(mdsmap['up']) <= 1): + self.mgr.log.info( + 'Upgrade: Waiting for fs %s to scale down to reach 1 MDS' % ( + fs_name)) + time.sleep(10) + continue_upgrade = False + continue + + if len(mdsmap['up']) == 0: + self.mgr.log.warning( + "Upgrade: No mds is up; continuing upgrade procedure to poke things in the right direction") + # This can happen because the current version MDS have + # incompatible compatsets; the mons will not do any promotions. + # We must upgrade to continue. + elif len(mdsmap['up']) > 0: + mdss = list(mdsmap['info'].values()) + assert len(mdss) == 1 + lone_mds = mdss[0] + if lone_mds['state'] != 'up:active': + self.mgr.log.info('Upgrade: Waiting for mds.%s to be up:active (currently %s)' % ( + lone_mds['name'], + lone_mds['state'], + )) + time.sleep(10) + continue_upgrade = False + continue + else: + assert False + + return continue_upgrade + + def _enough_mons_for_ok_to_stop(self) -> bool: + # type () -> bool + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'quorum_status', + }) + try: + j = json.loads(out) + except Exception: + raise OrchestratorError('failed to parse quorum status') + + mons = [m['name'] for m in j['monmap']['mons']] + return len(mons) > 2 + + def _enough_mds_for_ok_to_stop(self, mds_daemon: DaemonDescription) -> bool: + # type (DaemonDescription) -> bool + + # find fs this mds daemon belongs to + fsmap = self.mgr.get("fs_map") + for fs in fsmap.get('filesystems', []): + mdsmap = fs["mdsmap"] + fs_name = mdsmap["fs_name"] + + assert mds_daemon.daemon_id + if fs_name != mds_daemon.service_name().split('.', 1)[1]: + # wrong fs for this mds daemon + continue + + # get number of mds daemons for this fs + mds_count = len( + [daemon for daemon in self.mgr.cache.get_daemons_by_service(mds_daemon.service_name())]) + + # standby mds daemons for this fs? + if mdsmap["max_mds"] < mds_count: + return True + return False + + return True # if mds has no fs it should pass ok-to-stop + + def _detect_need_upgrade(self, daemons: List[DaemonDescription], target_digests: Optional[List[str]] = None, target_name: Optional[str] = None) -> Tuple[bool, List[Tuple[DaemonDescription, bool]], List[Tuple[DaemonDescription, bool]], int]: + # this function takes a list of daemons and container digests. The purpose + # is to go through each daemon and check if the current container digests + # for that daemon match the target digests. The purpose being that we determine + # if a daemon is upgraded to a certain container image or not based on what + # container digests it has. By checking the current digests against the + # targets we can determine which daemons still need to be upgraded + need_upgrade_self = False + need_upgrade: List[Tuple[DaemonDescription, bool]] = [] + need_upgrade_deployer: List[Tuple[DaemonDescription, bool]] = [] + done = 0 + if target_digests is None: + target_digests = [] + if target_name is None: + target_name = '' + for d in daemons: + assert d.daemon_type is not None + assert d.daemon_id is not None + assert d.hostname is not None + if self.mgr.use_agent and not self.mgr.cache.host_metadata_up_to_date(d.hostname): + continue + correct_image = False + # check if the container digest for the digest we're upgrading to matches + # the container digest for the daemon if "use_repo_digest" setting is true + # or that the image name matches the daemon's image name if "use_repo_digest" + # is false. The idea is to generally check if the daemon is already using + # the image we're upgrading to or not. Additionally, since monitoring stack + # daemons are included in the upgrade process but don't use the ceph images + # we are assuming any monitoring stack daemon is on the "correct" image already + if ( + (self.mgr.use_repo_digest and d.matches_digests(target_digests)) + or (not self.mgr.use_repo_digest and d.matches_image_name(target_name)) + or (d.daemon_type in NON_CEPH_IMAGE_TYPES) + ): + logger.debug('daemon %s.%s on correct image' % ( + d.daemon_type, d.daemon_id)) + correct_image = True + # do deployed_by check using digest no matter what. We don't care + # what repo the image used to deploy the daemon was as long + # as the image content is correct + if any(d in target_digests for d in (d.deployed_by or [])): + logger.debug('daemon %s.%s deployed by correct version' % ( + d.daemon_type, d.daemon_id)) + done += 1 + continue + + if self.mgr.daemon_is_self(d.daemon_type, d.daemon_id): + logger.info('Upgrade: Need to upgrade myself (mgr.%s)' % + self.mgr.get_mgr_id()) + need_upgrade_self = True + continue + + if correct_image: + logger.debug('daemon %s.%s not deployed by correct version' % ( + d.daemon_type, d.daemon_id)) + need_upgrade_deployer.append((d, True)) + else: + logger.debug('daemon %s.%s not correct (%s, %s, %s)' % ( + d.daemon_type, d.daemon_id, + d.container_image_name, d.container_image_digests, d.version)) + need_upgrade.append((d, False)) + + return (need_upgrade_self, need_upgrade, need_upgrade_deployer, done) + + def _to_upgrade(self, need_upgrade: List[Tuple[DaemonDescription, bool]], target_image: str) -> Tuple[bool, List[Tuple[DaemonDescription, bool]]]: + to_upgrade: List[Tuple[DaemonDescription, bool]] = [] + known_ok_to_stop: List[str] = [] + for d_entry in need_upgrade: + d = d_entry[0] + assert d.daemon_type is not None + assert d.daemon_id is not None + assert d.hostname is not None + + if not d.container_image_id: + if d.container_image_name == target_image: + logger.debug( + 'daemon %s has unknown container_image_id but has correct image name' % (d.name())) + continue + + if known_ok_to_stop: + if d.name() in known_ok_to_stop: + logger.info(f'Upgrade: {d.name()} is also safe to restart') + to_upgrade.append(d_entry) + continue + + if d.daemon_type == 'osd': + # NOTE: known_ok_to_stop is an output argument for + # _wait_for_ok_to_stop + if not self._wait_for_ok_to_stop(d, known_ok_to_stop): + return False, to_upgrade + + if d.daemon_type == 'mon' and self._enough_mons_for_ok_to_stop(): + if not self._wait_for_ok_to_stop(d, known_ok_to_stop): + return False, to_upgrade + + if d.daemon_type == 'mds' and self._enough_mds_for_ok_to_stop(d): + # when fail_fs is set to true, all MDS daemons will be moved to + # up:standby state, so Cephadm won't be able to upgrade due to + # this check and and will warn with "It is NOT safe to stop + # mds. at this time: one or more filesystems is + # currently degraded", therefore we bypass this check for that + # case. + assert self.upgrade_state is not None + if not self.upgrade_state.fail_fs \ + and not self._wait_for_ok_to_stop(d, known_ok_to_stop): + return False, to_upgrade + + to_upgrade.append(d_entry) + + # if we don't have a list of others to consider, stop now + if d.daemon_type in ['osd', 'mds', 'mon'] and not known_ok_to_stop: + break + return True, to_upgrade + + def _upgrade_daemons(self, to_upgrade: List[Tuple[DaemonDescription, bool]], target_image: str, target_digests: Optional[List[str]] = None) -> None: + assert self.upgrade_state is not None + num = 1 + if target_digests is None: + target_digests = [] + for d_entry in to_upgrade: + if self.upgrade_state.remaining_count is not None and self.upgrade_state.remaining_count <= 0 and not d_entry[1]: + self.mgr.log.info( + f'Hit upgrade limit of {self.upgrade_state.total_count}. Stopping upgrade') + return + d = d_entry[0] + assert d.daemon_type is not None + assert d.daemon_id is not None + assert d.hostname is not None + + # make sure host has latest container image + with self.mgr.async_timeout_handler(d.hostname, 'cephadm inspect-image'): + out, errs, code = self.mgr.wait_async(CephadmServe(self.mgr)._run_cephadm( + d.hostname, '', 'inspect-image', [], + image=target_image, no_fsid=True, error_ok=True)) + if code or not any(d in target_digests for d in json.loads(''.join(out)).get('repo_digests', [])): + logger.info('Upgrade: Pulling %s on %s' % (target_image, + d.hostname)) + self.upgrade_info_str = 'Pulling %s image on host %s' % ( + target_image, d.hostname) + with self.mgr.async_timeout_handler(d.hostname, 'cephadm pull'): + out, errs, code = self.mgr.wait_async(CephadmServe(self.mgr)._run_cephadm( + d.hostname, '', 'pull', [], + image=target_image, no_fsid=True, error_ok=True)) + if code: + self._fail_upgrade('UPGRADE_FAILED_PULL', { + 'severity': 'warning', + 'summary': 'Upgrade: failed to pull target image', + 'count': 1, + 'detail': [ + 'failed to pull %s on host %s' % (target_image, + d.hostname)], + }) + return + r = json.loads(''.join(out)) + if not any(d in target_digests for d in r.get('repo_digests', [])): + logger.info('Upgrade: image %s pull on %s got new digests %s (not %s), restarting' % ( + target_image, d.hostname, r['repo_digests'], target_digests)) + self.upgrade_info_str = 'Image %s pull on %s got new digests %s (not %s), restarting' % ( + target_image, d.hostname, r['repo_digests'], target_digests) + self.upgrade_state.target_digests = r['repo_digests'] + self._save_upgrade_state() + return + + self.upgrade_info_str = 'Currently upgrading %s daemons' % (d.daemon_type) + + if len(to_upgrade) > 1: + logger.info('Upgrade: Updating %s.%s (%d/%d)' % (d.daemon_type, d.daemon_id, num, min(len(to_upgrade), + self.upgrade_state.remaining_count if self.upgrade_state.remaining_count is not None else 9999999))) + else: + logger.info('Upgrade: Updating %s.%s' % + (d.daemon_type, d.daemon_id)) + action = 'Upgrading' if not d_entry[1] else 'Redeploying' + try: + daemon_spec = CephadmDaemonDeploySpec.from_daemon_description(d) + self.mgr._daemon_action( + daemon_spec, + 'redeploy', + image=target_image if not d_entry[1] else None + ) + self.mgr.cache.metadata_up_to_date[d.hostname] = False + except Exception as e: + self._fail_upgrade('UPGRADE_REDEPLOY_DAEMON', { + 'severity': 'warning', + 'summary': f'{action} daemon {d.name()} on host {d.hostname} failed.', + 'count': 1, + 'detail': [ + f'Upgrade daemon: {d.name()}: {e}' + ], + }) + return + num += 1 + if self.upgrade_state.remaining_count is not None and not d_entry[1]: + self.upgrade_state.remaining_count -= 1 + self._save_upgrade_state() + + def _handle_need_upgrade_self(self, need_upgrade_self: bool, upgrading_mgrs: bool) -> None: + if need_upgrade_self: + try: + self.mgr.mgr_service.fail_over() + except OrchestratorError as e: + self._fail_upgrade('UPGRADE_NO_STANDBY_MGR', { + 'severity': 'warning', + 'summary': f'Upgrade: {e}', + 'count': 1, + 'detail': [ + 'The upgrade process needs to upgrade the mgr, ' + 'but it needs at least one standby to proceed.', + ], + }) + return + + return # unreachable code, as fail_over never returns + elif upgrading_mgrs: + if 'UPGRADE_NO_STANDBY_MGR' in self.mgr.health_checks: + del self.mgr.health_checks['UPGRADE_NO_STANDBY_MGR'] + self.mgr.set_health_checks(self.mgr.health_checks) + + def _set_container_images(self, daemon_type: str, target_image: str, image_settings: Dict[str, str]) -> None: + # push down configs + daemon_type_section = name_to_config_section(daemon_type) + if image_settings.get(daemon_type_section) != target_image: + logger.info('Upgrade: Setting container_image for all %s' % + daemon_type) + self.mgr.set_container_image(daemon_type_section, target_image) + to_clean = [] + for section in image_settings.keys(): + if section.startswith(name_to_config_section(daemon_type) + '.'): + to_clean.append(section) + if to_clean: + logger.debug('Upgrade: Cleaning up container_image for %s' % + to_clean) + for section in to_clean: + ret, image, err = self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'name': 'container_image', + 'who': section, + }) + + def _complete_osd_upgrade(self, target_major: str, target_major_name: str) -> None: + osdmap = self.mgr.get("osd_map") + osd_min_name = osdmap.get("require_osd_release", "argonaut") + osd_min = ceph_release_to_major(osd_min_name) + if osd_min < int(target_major): + logger.info( + f'Upgrade: Setting require_osd_release to {target_major} {target_major_name}') + ret, _, err = self.mgr.check_mon_command({ + 'prefix': 'osd require-osd-release', + 'release': target_major_name, + }) + + def _complete_mds_upgrade(self) -> None: + assert self.upgrade_state is not None + if self.upgrade_state.fail_fs: + for fs in self.mgr.get("fs_map")['filesystems']: + fs_name = fs['mdsmap']['fs_name'] + self.mgr.log.info('Upgrade: Setting filesystem ' + f'{fs_name} Joinable') + try: + ret, _, err = self.mgr.check_mon_command({ + 'prefix': 'fs set', + 'fs_name': fs_name, + 'var': 'joinable', + 'val': 'true', + }) + except Exception as e: + logger.error("Failed to set fs joinable " + f"true due to {e}") + raise OrchestratorError("Failed to set" + "fs joinable true" + f"due to {e}") + elif self.upgrade_state.fs_original_max_mds: + for fs in self.mgr.get("fs_map")['filesystems']: + fscid = fs["id"] + fs_name = fs['mdsmap']['fs_name'] + new_max = self.upgrade_state.fs_original_max_mds.get(fscid, 1) + if new_max > 1: + self.mgr.log.info('Upgrade: Scaling up filesystem %s max_mds to %d' % ( + fs_name, new_max + )) + ret, _, err = self.mgr.check_mon_command({ + 'prefix': 'fs set', + 'fs_name': fs_name, + 'var': 'max_mds', + 'val': str(new_max), + }) + + self.upgrade_state.fs_original_max_mds = {} + self._save_upgrade_state() + if self.upgrade_state.fs_original_allow_standby_replay: + for fs in self.mgr.get("fs_map")['filesystems']: + fscid = fs["id"] + fs_name = fs['mdsmap']['fs_name'] + asr = self.upgrade_state.fs_original_allow_standby_replay.get(fscid, False) + if asr: + self.mgr.log.info('Upgrade: Enabling allow_standby_replay on filesystem %s' % ( + fs_name + )) + ret, _, err = self.mgr.check_mon_command({ + 'prefix': 'fs set', + 'fs_name': fs_name, + 'var': 'allow_standby_replay', + 'val': '1' + }) + + self.upgrade_state.fs_original_allow_standby_replay = {} + self._save_upgrade_state() + + def _mark_upgrade_complete(self) -> None: + if not self.upgrade_state: + logger.debug('_mark_upgrade_complete upgrade already marked complete, exiting') + return + logger.info('Upgrade: Complete!') + if self.upgrade_state.progress_id: + self.mgr.remote('progress', 'complete', + self.upgrade_state.progress_id) + self.upgrade_state = None + self._save_upgrade_state() + + def _do_upgrade(self): + # type: () -> None + if not self.upgrade_state: + logger.debug('_do_upgrade no state, exiting') + return + + if self.mgr.offline_hosts: + # offline host(s), on top of potential connection errors when trying to upgrade a daemon + # or pull an image, can cause issues where daemons are never ok to stop. Since evaluating + # whether or not that risk is present for any given offline hosts is a difficult problem, + # it's best to just fail upgrade cleanly so user can address the offline host(s) + + # the HostConnectionError expects a hostname and addr, so let's just take + # one at random. It doesn't really matter which host we say we couldn't reach here. + hostname: str = list(self.mgr.offline_hosts)[0] + addr: str = self.mgr.inventory.get_addr(hostname) + raise HostConnectionError(f'Host(s) were marked offline: {self.mgr.offline_hosts}', hostname, addr) + + target_image = self.target_image + target_id = self.upgrade_state.target_id + target_digests = self.upgrade_state.target_digests + target_version = self.upgrade_state.target_version + + first = False + if not target_id or not target_version or not target_digests: + # need to learn the container hash + logger.info('Upgrade: First pull of %s' % target_image) + self.upgrade_info_str = 'Doing first pull of %s image' % (target_image) + try: + with self.mgr.async_timeout_handler(f'cephadm inspect-image (image {target_image})'): + target_id, target_version, target_digests = self.mgr.wait_async( + CephadmServe(self.mgr)._get_container_image_info(target_image)) + except OrchestratorError as e: + self._fail_upgrade('UPGRADE_FAILED_PULL', { + 'severity': 'warning', + 'summary': 'Upgrade: failed to pull target image', + 'count': 1, + 'detail': [str(e)], + }) + return + if not target_version: + self._fail_upgrade('UPGRADE_FAILED_PULL', { + 'severity': 'warning', + 'summary': 'Upgrade: failed to pull target image', + 'count': 1, + 'detail': ['unable to extract ceph version from container'], + }) + return + self.upgrade_state.target_id = target_id + # extract the version portion of 'ceph version {version} ({sha1})' + self.upgrade_state.target_version = target_version.split(' ')[2] + self.upgrade_state.target_digests = target_digests + self._save_upgrade_state() + target_image = self.target_image + first = True + + if target_digests is None: + target_digests = [] + if target_version.startswith('ceph version '): + # tolerate/fix upgrade state from older version + self.upgrade_state.target_version = target_version.split(' ')[2] + target_version = self.upgrade_state.target_version + (target_major, _) = target_version.split('.', 1) + target_major_name = self.mgr.lookup_release_name(int(target_major)) + + if first: + logger.info('Upgrade: Target is version %s (%s)' % ( + target_version, target_major_name)) + logger.info('Upgrade: Target container is %s, digests %s' % ( + target_image, target_digests)) + + version_error = self._check_target_version(target_version) + if version_error: + self._fail_upgrade('UPGRADE_BAD_TARGET_VERSION', { + 'severity': 'error', + 'summary': f'Upgrade: cannot upgrade/downgrade to {target_version}', + 'count': 1, + 'detail': [version_error], + }) + return + + image_settings = self.get_distinct_container_image_settings() + + # Older monitors (pre-v16.2.5) asserted that FSMap::compat == + # MDSMap::compat for all fs. This is no longer the case beginning in + # v16.2.5. We must disable the sanity checks during upgrade. + # N.B.: we don't bother confirming the operator has not already + # disabled this or saving the config value. + self.mgr.check_mon_command({ + 'prefix': 'config set', + 'name': 'mon_mds_skip_sanity', + 'value': '1', + 'who': 'mon', + }) + + if self.upgrade_state.daemon_types is not None: + logger.debug( + f'Filtering daemons to upgrade by daemon types: {self.upgrade_state.daemon_types}') + daemons = [d for d in self.mgr.cache.get_daemons( + ) if d.daemon_type in self.upgrade_state.daemon_types] + elif self.upgrade_state.services is not None: + logger.debug( + f'Filtering daemons to upgrade by services: {self.upgrade_state.daemon_types}') + daemons = [] + for service in self.upgrade_state.services: + daemons += self.mgr.cache.get_daemons_by_service(service) + else: + daemons = [d for d in self.mgr.cache.get_daemons( + ) if d.daemon_type in CEPH_UPGRADE_ORDER] + if self.upgrade_state.hosts is not None: + logger.debug(f'Filtering daemons to upgrade by hosts: {self.upgrade_state.hosts}') + daemons = [d for d in daemons if d.hostname in self.upgrade_state.hosts] + upgraded_daemon_count: int = 0 + for daemon_type in CEPH_UPGRADE_ORDER: + if self.upgrade_state.remaining_count is not None and self.upgrade_state.remaining_count <= 0: + # we hit our limit and should end the upgrade + # except for cases where we only need to redeploy, but not actually upgrade + # the image (which we don't count towards our limit). This case only occurs with mgr + # and monitoring stack daemons. Additionally, this case is only valid if + # the active mgr is already upgraded. + if any(d in target_digests for d in self.mgr.get_active_mgr_digests()): + if daemon_type not in NON_CEPH_IMAGE_TYPES and daemon_type != 'mgr': + continue + else: + self._mark_upgrade_complete() + return + logger.debug('Upgrade: Checking %s daemons' % daemon_type) + daemons_of_type = [d for d in daemons if d.daemon_type == daemon_type] + + need_upgrade_self, need_upgrade, need_upgrade_deployer, done = self._detect_need_upgrade( + daemons_of_type, target_digests, target_image) + upgraded_daemon_count += done + self._update_upgrade_progress(upgraded_daemon_count / len(daemons)) + + # make sure mgr and non-ceph-image daemons are properly redeployed in staggered upgrade scenarios + if daemon_type == 'mgr' or daemon_type in NON_CEPH_IMAGE_TYPES: + if any(d in target_digests for d in self.mgr.get_active_mgr_digests()): + need_upgrade_names = [d[0].name() for d in need_upgrade] + \ + [d[0].name() for d in need_upgrade_deployer] + dds = [d for d in self.mgr.cache.get_daemons_by_type( + daemon_type) if d.name() not in need_upgrade_names] + need_upgrade_active, n1, n2, __ = self._detect_need_upgrade(dds, target_digests, target_image) + if not n1: + if not need_upgrade_self and need_upgrade_active: + need_upgrade_self = True + need_upgrade_deployer += n2 + else: + # no point in trying to redeploy with new version if active mgr is not on the new version + need_upgrade_deployer = [] + + if any(d in target_digests for d in self.mgr.get_active_mgr_digests()): + # only after the mgr itself is upgraded can we expect daemons to have + # deployed_by == target_digests + need_upgrade += need_upgrade_deployer + + # prepare filesystems for daemon upgrades? + if ( + daemon_type == 'mds' + and need_upgrade + and not self._prepare_for_mds_upgrade(target_major, [d_entry[0] for d_entry in need_upgrade]) + ): + return + + if need_upgrade: + self.upgrade_info_str = 'Currently upgrading %s daemons' % (daemon_type) + + _continue, to_upgrade = self._to_upgrade(need_upgrade, target_image) + if not _continue: + return + self._upgrade_daemons(to_upgrade, target_image, target_digests) + if to_upgrade: + return + + self._handle_need_upgrade_self(need_upgrade_self, daemon_type == 'mgr') + + # following bits of _do_upgrade are for completing upgrade for given + # types. If we haven't actually finished upgrading all the daemons + # of this type, we should exit the loop here + _, n1, n2, _ = self._detect_need_upgrade( + self.mgr.cache.get_daemons_by_type(daemon_type), target_digests, target_image) + if n1 or n2: + continue + + # complete mon upgrade? + if daemon_type == 'mon': + if not self.mgr.get("have_local_config_map"): + logger.info('Upgrade: Restarting mgr now that mons are running pacific') + need_upgrade_self = True + + self._handle_need_upgrade_self(need_upgrade_self, daemon_type == 'mgr') + + # make sure 'ceph versions' agrees + ret, out_ver, err = self.mgr.check_mon_command({ + 'prefix': 'versions', + }) + j = json.loads(out_ver) + for version, count in j.get(daemon_type, {}).items(): + short_version = version.split(' ')[2] + if short_version != target_version: + logger.warning( + 'Upgrade: %d %s daemon(s) are %s != target %s' % + (count, daemon_type, short_version, target_version)) + + self._set_container_images(daemon_type, target_image, image_settings) + + # complete osd upgrade? + if daemon_type == 'osd': + self._complete_osd_upgrade(target_major, target_major_name) + + # complete mds upgrade? + if daemon_type == 'mds': + self._complete_mds_upgrade() + + # Make sure all metadata is up to date before saying we are done upgrading this daemon type + if self.mgr.use_agent and not self.mgr.cache.all_host_metadata_up_to_date(): + self.mgr.agent_helpers._request_ack_all_not_up_to_date() + return + + logger.debug('Upgrade: Upgraded %s daemon(s).' % daemon_type) + + # clean up + logger.info('Upgrade: Finalizing container_image settings') + self.mgr.set_container_image('global', target_image) + + for daemon_type in CEPH_UPGRADE_ORDER: + ret, image, err = self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'name': 'container_image', + 'who': name_to_config_section(daemon_type), + }) + + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'name': 'mon_mds_skip_sanity', + 'who': 'mon', + }) + + self._mark_upgrade_complete() + return diff --git a/src/pybind/mgr/cephadm/utils.py b/src/pybind/mgr/cephadm/utils.py new file mode 100644 index 000000000..63672936c --- /dev/null +++ b/src/pybind/mgr/cephadm/utils.py @@ -0,0 +1,153 @@ +import logging +import json +import socket +from enum import Enum +from functools import wraps +from typing import Optional, Callable, TypeVar, List, NewType, TYPE_CHECKING, Any, NamedTuple +from orchestrator import OrchestratorError + +if TYPE_CHECKING: + from cephadm import CephadmOrchestrator + +T = TypeVar('T') +logger = logging.getLogger(__name__) + +ConfEntity = NewType('ConfEntity', str) + + +class CephadmNoImage(Enum): + token = 1 + + +# ceph daemon types that use the ceph container image. +# NOTE: order important here as these are used for upgrade order +CEPH_TYPES = ['mgr', 'mon', 'crash', 'osd', 'mds', 'rgw', + 'rbd-mirror', 'cephfs-mirror', 'ceph-exporter'] +GATEWAY_TYPES = ['iscsi', 'nfs', 'nvmeof'] +MONITORING_STACK_TYPES = ['node-exporter', 'prometheus', + 'alertmanager', 'grafana', 'loki', 'promtail'] +RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES = ['haproxy', 'nfs'] + +CEPH_UPGRADE_ORDER = CEPH_TYPES + GATEWAY_TYPES + MONITORING_STACK_TYPES + +# these daemon types use the ceph container image +CEPH_IMAGE_TYPES = CEPH_TYPES + ['iscsi', 'nfs'] + +# these daemons do not use the ceph image. There are other daemons +# that also don't use the ceph image, but we only care about those +# that are part of the upgrade order here +NON_CEPH_IMAGE_TYPES = MONITORING_STACK_TYPES + ['nvmeof'] + +# Used for _run_cephadm used for check-host etc that don't require an --image parameter +cephadmNoImage = CephadmNoImage.token + + +class ContainerInspectInfo(NamedTuple): + image_id: str + ceph_version: Optional[str] + repo_digests: Optional[List[str]] + + +class SpecialHostLabels(str, Enum): + ADMIN: str = '_admin' + NO_MEMORY_AUTOTUNE: str = '_no_autotune_memory' + DRAIN_DAEMONS: str = '_no_schedule' + DRAIN_CONF_KEYRING: str = '_no_conf_keyring' + + def to_json(self) -> str: + return self.value + + +def name_to_config_section(name: str) -> ConfEntity: + """ + Map from daemon names to ceph entity names (as seen in config) + """ + daemon_type = name.split('.', 1)[0] + if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash', 'iscsi', 'ceph-exporter', 'nvmeof']: + return ConfEntity('client.' + name) + elif daemon_type in ['mon', 'osd', 'mds', 'mgr', 'client']: + return ConfEntity(name) + else: + return ConfEntity('mon') + + +def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]: + @wraps(f) + def forall_hosts_wrapper(*args: Any) -> List[T]: + from cephadm.module import CephadmOrchestrator + + # Some weird logic to make calling functions with multiple arguments work. + if len(args) == 1: + vals = args[0] + self = None + elif len(args) == 2: + self, vals = args + else: + assert 'either f([...]) or self.f([...])' + + def do_work(arg: Any) -> T: + if not isinstance(arg, tuple): + arg = (arg, ) + try: + if self: + return f(self, *arg) + return f(*arg) + except Exception: + logger.exception(f'executing {f.__name__}({args}) failed.') + raise + + assert CephadmOrchestrator.instance is not None + return CephadmOrchestrator.instance._worker_pool.map(do_work, vals) + + return forall_hosts_wrapper + + +def get_cluster_health(mgr: 'CephadmOrchestrator') -> str: + # check cluster health + ret, out, err = mgr.check_mon_command({ + 'prefix': 'health', + 'format': 'json', + }) + try: + j = json.loads(out) + except ValueError: + msg = 'Failed to parse health status: Cannot decode JSON' + logger.exception('%s: \'%s\'' % (msg, out)) + raise OrchestratorError('failed to parse health status') + + return j['status'] + + +def is_repo_digest(image_name: str) -> bool: + """ + repo digest are something like "ceph/ceph@sha256:blablabla" + """ + return '@' in image_name + + +def resolve_ip(hostname: str) -> str: + try: + r = socket.getaddrinfo(hostname, None, flags=socket.AI_CANONNAME, + type=socket.SOCK_STREAM) + # pick first v4 IP, if present + for a in r: + if a[0] == socket.AF_INET: + return a[4][0] + return r[0][4][0] + except socket.gaierror as e: + raise OrchestratorError(f"Cannot resolve ip for host {hostname}: {e}") + + +def ceph_release_to_major(release: str) -> int: + return ord(release[0]) - ord('a') + 1 + + +def file_mode_to_str(mode: int) -> str: + r = '' + for shift in range(0, 9, 3): + r = ( + f'{"r" if (mode >> shift) & 4 else "-"}' + f'{"w" if (mode >> shift) & 2 else "-"}' + f'{"x" if (mode >> shift) & 1 else "-"}' + ) + r + return r diff --git a/src/pybind/mgr/cephadm/vagrant.config.example.json b/src/pybind/mgr/cephadm/vagrant.config.example.json new file mode 100644 index 000000000..9419af630 --- /dev/null +++ b/src/pybind/mgr/cephadm/vagrant.config.example.json @@ -0,0 +1,13 @@ +/** + * To use a permanent config copy this file to "vagrant.config.json", + * edit it and remove this comment because comments are not allowed + * in a valid JSON file. + */ + +{ + "mgrs": 1, + "mons": 1, + "osds": 1, + "disks": 2 +} + diff --git a/src/pybind/mgr/cli_api/__init__.py b/src/pybind/mgr/cli_api/__init__.py new file mode 100644 index 000000000..a52284054 --- /dev/null +++ b/src/pybind/mgr/cli_api/__init__.py @@ -0,0 +1,10 @@ +from .module import CLI + +__all__ = [ + "CLI", +] + +import os +if 'UNITTEST' in os.environ: + import tests # noqa # pylint: disable=unused-import + __all__.append(tests.__name__) diff --git a/src/pybind/mgr/cli_api/module.py b/src/pybind/mgr/cli_api/module.py new file mode 100755 index 000000000..79b042eb0 --- /dev/null +++ b/src/pybind/mgr/cli_api/module.py @@ -0,0 +1,120 @@ +import concurrent.futures +import functools +import inspect +import logging +import time +import errno +from typing import Any, Callable, Dict, List + +from mgr_module import MgrModule, HandleCommandResult, CLICommand, API + +logger = logging.getLogger() +get_time = time.perf_counter + + +def pretty_json(obj: Any) -> Any: + import json + return json.dumps(obj, sort_keys=True, indent=2) + + +class CephCommander: + """ + Utility class to inspect Python functions and generate corresponding + CephCommand signatures (see src/mon/MonCommand.h for details) + """ + + def __init__(self, func: Callable): + self.func = func + self.signature = inspect.signature(func) + self.params = self.signature.parameters + + def to_ceph_signature(self) -> Dict[str, str]: + """ + Generate CephCommand signature (dict-like) + """ + return { + 'prefix': f'mgr cli {self.func.__name__}', + 'perm': API.perm.get(self.func) + } + + +class MgrAPIReflector(type): + """ + Metaclass to register COMMANDS and Command Handlers via CLICommand + decorator + """ + + def __new__(cls, name, bases, dct): # type: ignore + klass = super().__new__(cls, name, bases, dct) + cls.threaded_benchmark_runner = None + for base in bases: + for name, func in inspect.getmembers(base, cls.is_public): + # However not necessary (CLICommand uses a registry) + # save functions to klass._cli_{n}() methods. This + # can help on unit testing + wrapper = cls.func_wrapper(func) + command = CLICommand(**CephCommander(func).to_ceph_signature())( # type: ignore + wrapper) + setattr( + klass, + f'_cli_{name}', + command) + return klass + + @staticmethod + def is_public(func: Callable) -> bool: + return ( + inspect.isfunction(func) + and not func.__name__.startswith('_') + and API.expose.get(func) + ) + + @staticmethod + def func_wrapper(func: Callable) -> Callable: + @functools.wraps(func) + def wrapper(self, *args, **kwargs) -> HandleCommandResult: # type: ignore + return HandleCommandResult(stdout=pretty_json( + func(self, *args, **kwargs))) + + # functools doesn't change the signature when wrapping a function + # so we do it manually + signature = inspect.signature(func) + wrapper.__signature__ = signature # type: ignore + return wrapper + + +class CLI(MgrModule, metaclass=MgrAPIReflector): + @CLICommand('mgr cli_benchmark') + def benchmark(self, iterations: int, threads: int, func_name: str, + func_args: List[str] = None) -> HandleCommandResult: # type: ignore + func_args = () if func_args is None else func_args + if iterations and threads: + try: + func = getattr(self, func_name) + except AttributeError: + return HandleCommandResult(errno.EINVAL, + stderr="Could not find the public " + "function you are requesting") + else: + raise BenchmarkException("Number of calls and number " + "of parallel calls must be greater than 0") + + def timer(*args: Any) -> float: + time_start = get_time() + func(*func_args) + return get_time() - time_start + + with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: + results_iter = executor.map(timer, range(iterations)) + results = list(results_iter) + + stats = { + "avg": sum(results) / len(results), + "max": max(results), + "min": min(results), + } + return HandleCommandResult(stdout=pretty_json(stats)) + + +class BenchmarkException(Exception): + pass diff --git a/src/pybind/mgr/cli_api/tests/__init__.py b/src/pybind/mgr/cli_api/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/pybind/mgr/cli_api/tests/test_cli_api.py b/src/pybind/mgr/cli_api/tests/test_cli_api.py new file mode 100644 index 000000000..ee42dc96a --- /dev/null +++ b/src/pybind/mgr/cli_api/tests/test_cli_api.py @@ -0,0 +1,40 @@ +import unittest + +from ..module import CLI, BenchmarkException, HandleCommandResult + + +class BenchmarkRunnerTest(unittest.TestCase): + def setUp(self): + self.cli = CLI('CLI', 0, 0) + + def test_number_of_calls_on_start_fails(self): + with self.assertRaises(BenchmarkException) as ctx: + self.cli.benchmark(0, 10, 'list_servers', []) + self.assertEqual(str(ctx.exception), + "Number of calls and number " + "of parallel calls must be greater than 0") + + def test_number_of_parallel_calls_on_start_fails(self): + with self.assertRaises(BenchmarkException) as ctx: + self.cli.benchmark(100, 0, 'list_servers', []) + self.assertEqual(str(ctx.exception), + "Number of calls and number " + "of parallel calls must be greater than 0") + + def test_number_of_parallel_calls_on_start_works(self): + CLI.benchmark(10, 10, "get", "osd_map") + + def test_function_name_fails(self): + for iterations in [0, 1]: + threads = 0 if iterations else 1 + with self.assertRaises(BenchmarkException) as ctx: + self.cli.benchmark(iterations, threads, 'fake_method', []) + self.assertEqual(str(ctx.exception), + "Number of calls and number " + "of parallel calls must be greater than 0") + result: HandleCommandResult = self.cli.benchmark(1, 1, 'fake_method', []) + self.assertEqual(result.stderr, "Could not find the public " + "function you are requesting") + + def test_function_name_works(self): + CLI.benchmark(10, 10, "get", "osd_map") diff --git a/src/pybind/mgr/crash/__init__.py b/src/pybind/mgr/crash/__init__.py new file mode 100644 index 000000000..ee85dc9d3 --- /dev/null +++ b/src/pybind/mgr/crash/__init__.py @@ -0,0 +1,2 @@ +# flake8: noqa +from .module import Module diff --git a/src/pybind/mgr/crash/module.py b/src/pybind/mgr/crash/module.py new file mode 100644 index 000000000..e9f78c815 --- /dev/null +++ b/src/pybind/mgr/crash/module.py @@ -0,0 +1,447 @@ +import hashlib +from mgr_module import CLICommand, CLIReadCommand, CLIWriteCommand, MgrModule, Option +import datetime +import errno +import functools +import inspect +import json +from collections import defaultdict +from prettytable import PrettyTable +import re +from threading import Event, Lock +from typing import cast, Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Tuple, TypeVar, \ + Union, TYPE_CHECKING + + +DATEFMT = '%Y-%m-%dT%H:%M:%S.%f' +OLD_DATEFMT = '%Y-%m-%d %H:%M:%S.%f' + +MAX_WAIT = 600 +MIN_WAIT = 60 + + +FuncT = TypeVar('FuncT', bound=Callable) + + +def with_crashes(func: FuncT) -> FuncT: + @functools.wraps(func) + def wrapper(self: 'Module', *args: Any, **kwargs: Any) -> Tuple[int, str, str]: + with self.crashes_lock: + if not self.crashes: + self._load_crashes() + return func(self, *args, **kwargs) + wrapper.__signature__ = inspect.signature(func) # type: ignore[attr-defined] + return cast(FuncT, wrapper) + + +CrashT = Dict[str, Union[str, List[str]]] + + +class Module(MgrModule): + MODULE_OPTIONS = [ + Option( + name='warn_recent_interval', + type='secs', + default=60 * 60 * 24 * 14, + desc='time interval in which to warn about recent crashes', + runtime=True), + Option( + name='retain_interval', + type='secs', + default=60 * 60 * 24 * 365, + desc='how long to retain crashes before pruning them', + runtime=True), + ] + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super(Module, self).__init__(*args, **kwargs) + self.crashes: Optional[Dict[str, CrashT]] = None + self.crashes_lock = Lock() + self.run = True + self.event = Event() + if TYPE_CHECKING: + self.warn_recent_interval = 0.0 + self.retain_interval = 0.0 + + def shutdown(self) -> None: + self.run = False + self.event.set() + + def serve(self) -> None: + self.config_notify() + while self.run: + with self.crashes_lock: + self._refresh_health_checks() + self._prune(self.retain_interval) + wait = min(MAX_WAIT, max(self.warn_recent_interval / 100, MIN_WAIT)) + self.event.wait(wait) + self.event.clear() + + def config_notify(self) -> None: + for opt in self.MODULE_OPTIONS: + setattr(self, + opt['name'], + self.get_module_option(opt['name'])) + self.log.debug(' mgr option %s = %s', + opt['name'], getattr(self, opt['name'])) + + def _load_crashes(self) -> None: + raw = self.get_store_prefix('crash/') + self.crashes = {k[6:]: json.loads(m) for (k, m) in raw.items()} + + def _refresh_health_checks(self) -> None: + if not self.crashes: + self._load_crashes() + assert self.crashes is not None + cutoff = datetime.datetime.utcnow() - datetime.timedelta( + seconds=self.warn_recent_interval) + recent = { + crashid: crash for crashid, crash in self.crashes.items() + if (self.time_from_string(cast(str, crash['timestamp'])) > cutoff + and 'archived' not in crash) + } + + def prune_detail(ls: List[str]) -> int: + num = len(ls) + if num > 30: + ls = ls[0:30] + ls.append('and %d more' % (num - 30)) + return num + + daemon_crashes = [] + module_crashes = [] + for c in recent.values(): + if 'mgr_module' in c: + module_crashes.append(c) + else: + daemon_crashes.append(c) + daemon_detail = [ + '%s crashed on host %s at %s' % ( + crash.get('entity_name', 'unidentified daemon'), + crash.get('utsname_hostname', '(unknown)'), + crash.get('timestamp', 'unknown time')) + for crash in daemon_crashes] + module_detail = [ + 'mgr module %s crashed in daemon %s on host %s at %s' % ( + crash.get('mgr_module', 'unidentified module'), + crash.get('entity_name', 'unidentified daemon'), + crash.get('utsname_hostname', '(unknown)'), + crash.get('timestamp', 'unknown time')) + for crash in module_crashes] + daemon_num = prune_detail(daemon_detail) + module_num = prune_detail(module_detail) + + health_checks: Dict[str, Dict[str, Union[int, str, List[str]]]] = {} + if daemon_detail: + self.log.debug('daemon detail %s' % daemon_detail) + health_checks['RECENT_CRASH'] = { + 'severity': 'warning', + 'summary': '%d daemons have recently crashed' % (daemon_num), + 'count': daemon_num, + 'detail': daemon_detail, + } + if module_detail: + self.log.debug('module detail %s' % module_detail) + health_checks['RECENT_MGR_MODULE_CRASH'] = { + 'severity': 'warning', + 'summary': '%d mgr modules have recently crashed' % (module_num), + 'count': module_num, + 'detail': module_detail, + } + + self.set_health_checks(health_checks) + + def time_from_string(self, timestr: str) -> datetime.datetime: + # drop the 'Z' timezone indication, it's always UTC + timestr = timestr.rstrip('Z') + try: + return datetime.datetime.strptime(timestr, DATEFMT) + except ValueError: + return datetime.datetime.strptime(timestr, OLD_DATEFMT) + + def validate_crash_metadata(self, inbuf: str) -> Dict[str, Union[str, List[str]]]: + # raise any exceptions to caller + metadata = json.loads(inbuf) + for f in ['crash_id', 'timestamp']: + if f not in metadata: + raise AttributeError("missing '%s' field" % f) + _ = self.time_from_string(metadata['timestamp']) + return metadata + + def timestamp_filter(self, f: Callable[[datetime.datetime], bool]) -> Iterable[Tuple[str, CrashT]]: + """ + Filter crash reports by timestamp. + + :param f: f(time) return true to keep crash report + :returns: crash reports for which f(time) returns true + """ + def inner(pair: Tuple[str, CrashT]) -> bool: + _, crash = pair + time = self.time_from_string(cast(str, crash["timestamp"])) + return f(time) + assert self.crashes is not None + return filter(inner, self.crashes.items()) + + # stack signature helpers + + def sanitize_backtrace(self, bt: List[str]) -> List[str]: + ret = list() + for func_record in bt: + # split into two fields on last space, take the first one, + # strip off leading ( and trailing ) + func_plus_offset = func_record.rsplit(' ', 1)[0][1:-1] + ret.append(func_plus_offset.split('+')[0]) + + return ret + + ASSERT_MATCHEXPR = re.compile(r'(?s)(.*) thread .* time .*(: .*)\n') + + def sanitize_assert_msg(self, msg: str) -> str: + # (?s) allows matching newline. get everything up to "thread" and + # then after-and-including the last colon-space. This skips the + # thread id, timestamp, and file:lineno, because file is already in + # the beginning, and lineno may vary. + matched = self.ASSERT_MATCHEXPR.match(msg) + assert matched + return ''.join(matched.groups()) + + def calc_sig(self, bt: List[str], assert_msg: Optional[str]) -> str: + sig = hashlib.sha256() + for func in self.sanitize_backtrace(bt): + sig.update(func.encode()) + if assert_msg: + sig.update(self.sanitize_assert_msg(assert_msg).encode()) + return ''.join('%02x' % c for c in sig.digest()) + + # command handlers + + @CLIReadCommand('crash info') + @with_crashes + def do_info(self, id: str) -> Tuple[int, str, str]: + """ + show crash dump metadata + """ + crashid = id + assert self.crashes is not None + crash = self.crashes.get(crashid) + if not crash: + return errno.EINVAL, '', 'crash info: %s not found' % crashid + val = json.dumps(crash, indent=4, sort_keys=True) + return 0, val, '' + + @CLICommand('crash post') + def do_post(self, inbuf: str) -> Tuple[int, str, str]: + """ + Add a crash dump (use -i ) + """ + try: + metadata = self.validate_crash_metadata(inbuf) + except Exception as e: + return errno.EINVAL, '', 'malformed crash metadata: %s' % e + if 'backtrace' in metadata: + backtrace = cast(List[str], metadata.get('backtrace')) + assert_msg = cast(Optional[str], metadata.get('assert_msg')) + metadata['stack_sig'] = self.calc_sig(backtrace, assert_msg) + crashid = cast(str, metadata['crash_id']) + assert self.crashes is not None + if crashid not in self.crashes: + self.crashes[crashid] = metadata + key = 'crash/%s' % crashid + self.set_store(key, json.dumps(metadata)) + self._refresh_health_checks() + return 0, '', '' + + def ls(self) -> Tuple[int, str, str]: + if not self.crashes: + self._load_crashes() + return self.do_ls_all('') + + def _do_ls(self, t: Iterable[CrashT], format: Optional[str]) -> Tuple[int, str, str]: + r = sorted(t, key=lambda i: i['crash_id']) + if format in ('json', 'json-pretty'): + return 0, json.dumps(r, indent=4, sort_keys=True), '' + else: + table = PrettyTable(['ID', 'ENTITY', 'NEW'], + border=False) + table.left_padding_width = 0 + table.right_padding_width = 2 + table.align['ID'] = 'l' + table.align['ENTITY'] = 'l' + for c in r: + table.add_row([c.get('crash_id'), + c.get('entity_name', 'unknown'), + '' if 'archived' in c else '*']) + return 0, table.get_string(), '' + + @CLIReadCommand('crash ls') + @with_crashes + def do_ls_all(self, format: Optional[str] = None) -> Tuple[int, str, str]: + """ + Show new and archived crash dumps + """ + assert self.crashes is not None + return self._do_ls(self.crashes.values(), format) + + @CLIReadCommand('crash ls-new') + @with_crashes + def do_ls_new(self, format: Optional[str] = None) -> Tuple[int, str, str]: + """ + Show new crash dumps + """ + assert self.crashes is not None + t = [crash for crashid, crash in self.crashes.items() + if 'archived' not in crash] + return self._do_ls(t, format) + + @CLICommand('crash rm') + @with_crashes + def do_rm(self, id: str) -> Tuple[int, str, str]: + """ + Remove a saved crash + """ + crashid = id + assert self.crashes is not None + if crashid in self.crashes: + del self.crashes[crashid] + key = 'crash/%s' % crashid + self.set_store(key, None) # removes key + self._refresh_health_checks() + return 0, '', '' + + @CLICommand('crash prune') + @with_crashes + def do_prune(self, keep: int) -> Tuple[int, str, str]: + """ + Remove crashes older than days + """ + self._prune(keep * datetime.timedelta(days=1).total_seconds()) + return 0, '', '' + + def _prune(self, seconds: float) -> None: + now = datetime.datetime.utcnow() + cutoff = now - datetime.timedelta(seconds=seconds) + removed_any = False + # make a copy of the list, since we'll modify self.crashes below + to_prune = list(self.timestamp_filter(lambda ts: ts <= cutoff)) + assert self.crashes is not None + for crashid, crash in to_prune: + del self.crashes[crashid] + key = 'crash/%s' % crashid + self.set_store(key, None) + removed_any = True + if removed_any: + self._refresh_health_checks() + + @CLIWriteCommand('crash archive') + @with_crashes + def do_archive(self, id: str) -> Tuple[int, str, str]: + """ + Acknowledge a crash and silence health warning(s) + """ + crashid = id + assert self.crashes is not None + crash = self.crashes.get(crashid) + if not crash: + return errno.EINVAL, '', 'crash info: %s not found' % crashid + if not crash.get('archived'): + crash['archived'] = str(datetime.datetime.utcnow()) + self.crashes[crashid] = crash + key = 'crash/%s' % crashid + self.set_store(key, json.dumps(crash)) + self._refresh_health_checks() + return 0, '', '' + + @CLIWriteCommand('crash archive-all') + @with_crashes + def do_archive_all(self) -> Tuple[int, str, str]: + """ + Acknowledge all new crashes and silence health warning(s) + """ + assert self.crashes is not None + for crashid, crash in self.crashes.items(): + if not crash.get('archived'): + crash['archived'] = str(datetime.datetime.utcnow()) + self.crashes[crashid] = crash + key = 'crash/%s' % crashid + self.set_store(key, json.dumps(crash)) + self._refresh_health_checks() + return 0, '', '' + + @CLIReadCommand('crash stat') + @with_crashes + def do_stat(self) -> Tuple[int, str, str]: + """ + Summarize recorded crashes + """ + # age in days for reporting, ordered smallest first + AGE_IN_DAYS = [1, 3, 7] + retlines = list() + + BinnedStatsT = Dict[str, Union[int, datetime.datetime, List[str]]] + + def binstr(bindict: BinnedStatsT) -> str: + binlines = list() + id_list = cast(List[str], bindict['idlist']) + count = len(id_list) + if count: + binlines.append( + '%d older than %s days old:' % (count, bindict['age']) + ) + for crashid in id_list: + binlines.append(crashid) + return '\n'.join(binlines) + + total = 0 + now = datetime.datetime.utcnow() + bins: List[BinnedStatsT] = [] + for age in AGE_IN_DAYS: + agelimit = now - datetime.timedelta(days=age) + bins.append({ + 'age': age, + 'agelimit': agelimit, + 'idlist': list() + }) + + assert self.crashes is not None + for crashid, crash in self.crashes.items(): + total += 1 + stamp = self.time_from_string(cast(str, crash['timestamp'])) + for bindict in bins: + if stamp <= cast(datetime.datetime, bindict['agelimit']): + cast(List[str], bindict['idlist']).append(crashid) + # don't count this one again + continue + + retlines.append('%d crashes recorded' % total) + + for bindict in bins: + retlines.append(binstr(bindict)) + return 0, '\n'.join(retlines), '' + + @CLIReadCommand('crash json_report') + @with_crashes + def do_json_report(self, hours: int) -> Tuple[int, str, str]: + """ + Crashes in the last hours + """ + # Return a machine readable summary of recent crashes. + report: DefaultDict[str, int] = defaultdict(lambda: 0) + assert self.crashes is not None + for crashid, crash in self.crashes.items(): + pname = cast(str, crash.get("process_name", "unknown")) + if not pname: + pname = "unknown" + report[pname] += 1 + + return 0, '', json.dumps(report, sort_keys=True) + + def self_test(self) -> None: + # test time conversion + timestr = '2018-06-22T20:35:38.058818Z' + old_timestr = '2018-06-22 20:35:38.058818Z' + dt = self.time_from_string(timestr) + if dt != datetime.datetime(2018, 6, 22, 20, 35, 38, 58818): + raise RuntimeError('time_from_string() failed') + dt = self.time_from_string(old_timestr) + if dt != datetime.datetime(2018, 6, 22, 20, 35, 38, 58818): + raise RuntimeError('time_from_string() (old) failed') diff --git a/src/pybind/mgr/dashboard/.coveragerc b/src/pybind/mgr/dashboard/.coveragerc new file mode 100644 index 000000000..29a63192c --- /dev/null +++ b/src/pybind/mgr/dashboard/.coveragerc @@ -0,0 +1,7 @@ +[run] +omit = tests/* + */python*/* + ceph_module_mock.py + __init__.py + */mgr_module.py + diff --git a/src/pybind/mgr/dashboard/.editorconfig b/src/pybind/mgr/dashboard/.editorconfig new file mode 100644 index 000000000..a831e3da1 --- /dev/null +++ b/src/pybind/mgr/dashboard/.editorconfig @@ -0,0 +1,29 @@ +# EditorConfig helps developers define and maintain consistent coding styles +# between different editors and IDEs.: http://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true + +# Set default charset +[*.{js,py}] +charset = utf-8 + +# 4 space indentation for Python files +[*.py] +indent_style = space +indent_size = 4 + +# Indentation override for all JS under frontend directory +[frontend/**.js] +indent_style = space +indent_size = 2 + +# Indentation override for all HTML under frontend directory +[frontend/**.html] +indent_style = space +indent_size = 2 diff --git a/src/pybind/mgr/dashboard/.gitignore b/src/pybind/mgr/dashboard/.gitignore new file mode 100644 index 000000000..d457a7db3 --- /dev/null +++ b/src/pybind/mgr/dashboard/.gitignore @@ -0,0 +1,15 @@ +.coverage* +htmlcov +.tox +coverage.xml +junit*xml +.cache +ceph.conf + +# IDE +.vscode +*.egg +.env + +# virtualenv +venv diff --git a/src/pybind/mgr/dashboard/.pylintrc b/src/pybind/mgr/dashboard/.pylintrc new file mode 100644 index 000000000..79dfbad7d --- /dev/null +++ b/src/pybind/mgr/dashboard/.pylintrc @@ -0,0 +1,541 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +# TODO: remove racially insensitive terms when this becomes fixed: https://github.com/PyCQA/pylint/issues/3669 +extension-pkg-whitelist=rados,rbd,math,cephfs + +# Add files or directories to the blocklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blocklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +init-hook='import sys; sys.path.append("./")' + +# Use multiple processes to speed up Pylint. +jobs=1 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=import-star-module-level, + raw-checker-failed, + bad-inline-option, + locally-disabled, + locally-enabled, + suppressed-message, + useless-suppression, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + indexing-exception, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating, + missing-docstring, + invalid-name, + no-self-use, + too-few-public-methods, + no-member, + too-many-arguments, + too-many-locals, + too-many-statements, + useless-object-inheritance, + relative-beyond-top-level, + raise-missing-from, + super-with-arguments, + import-outside-toplevel, + unsubscriptable-object + + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio).You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=optparse.Values,sys.exit + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins + + +[BASIC] + +# Naming style matching correct argument names +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style +#argument-rgx= + +# Naming style matching correct attribute names +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style +#class-attribute-rgx= + +# Naming style matching correct class names +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming-style +#class-rgx= + +# Naming style matching correct constant names +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma +good-names=i, + j, + k, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Naming style matching correct inline iteration names +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style +#inlinevar-rgx= + +# Naming style matching correct method names +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style +#method-rgx= + +# Naming style matching correct module names +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style +#variable-rgx= + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module +max-module-lines=1000 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma, + dict-separator + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules=cherrypy,distutils,rados,rbd,cephfs + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=10 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of statements in function / method body +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/src/pybind/mgr/dashboard/CMakeLists.txt b/src/pybind/mgr/dashboard/CMakeLists.txt new file mode 100644 index 000000000..81bb9dd1b --- /dev/null +++ b/src/pybind/mgr/dashboard/CMakeLists.txt @@ -0,0 +1,23 @@ +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + DESTINATION ${CEPH_INSTALL_DATADIR}/mgr + ${mgr_module_install_excludes} + PATTERN "frontend/*" EXCLUDE + PATTERN ".*" EXCLUDE) + +if(WITH_MGR_DASHBOARD_FRONTEND) + # build from source + add_subdirectory(frontend) + if(WITH_TESTS) + include(AddCephTest) + add_tox_test(mgr-dashboard-py3 TOX_ENVS py3) + add_tox_test(mgr-dashboard-lint TOX_ENVS lint) + add_tox_test(mgr-dashboard-check TOX_ENVS check) + add_tox_test(mgr-dashboard-openapi TOX_ENVS openapi-check) + endif() +else() + # prebuilt + install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/frontend/dist + DESTINATION ${CEPH_INSTALL_DATADIR}/mgr/dashboard/frontend) + install(FILES frontend/package.json + DESTINATION ${CEPH_INSTALL_DATADIR}/mgr/dashboard/frontend) +endif() diff --git a/src/pybind/mgr/dashboard/HACKING.rst b/src/pybind/mgr/dashboard/HACKING.rst new file mode 100644 index 000000000..39c3d6744 --- /dev/null +++ b/src/pybind/mgr/dashboard/HACKING.rst @@ -0,0 +1,10 @@ +Ceph Dashboard Developer Documentation +====================================== + +Note: The content of this file has been moved into the Ceph Developer Guide. + +If you're interested in helping with the development of the dashboard, please +see ``/doc/dev/developer_guide/dash_devel.rst`` or the `online version +`_ for +details on how to set up a development environment and other development-related +topics. diff --git a/src/pybind/mgr/dashboard/README.rst b/src/pybind/mgr/dashboard/README.rst new file mode 100644 index 000000000..623ba2528 --- /dev/null +++ b/src/pybind/mgr/dashboard/README.rst @@ -0,0 +1,35 @@ +Ceph Dashboard +============== + +Overview +-------- + +The Ceph Dashboard is a built-in web-based Ceph management and monitoring +application to administer various aspects and objects of the cluster. It is +implemented as a Ceph Manager module. + +Enabling and Starting the Dashboard +----------------------------------- + +If you want to start the dashboard from within a development environment, you +need to have built Ceph (see the toplevel ``README.md`` file and the `developer +documentation `_ for +details on how to accomplish this. + +If you use the ``vstart.sh`` script to start up your development cluster, it +will configure and enable the dashboard automatically. The URL and login +credentials are displayed when the script finishes. + +Please see the `Ceph Dashboard documentation +`_ for details on how to +enable and configure the dashboard manually and how to configure other settings, +e.g. access to the Ceph object gateway. + +Working on the Dashboard Code +----------------------------- + +If you're interested in helping with the development of the dashboard, please +see ``/doc/dev/dev_guide/dash_devel.rst`` or the `online version +`_ for +details on how to set up a development environment and other development-related +topics. diff --git a/src/pybind/mgr/dashboard/__init__.py b/src/pybind/mgr/dashboard/__init__.py new file mode 100644 index 000000000..d2eab9751 --- /dev/null +++ b/src/pybind/mgr/dashboard/__init__.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# pylint: disable=wrong-import-position,global-statement,protected-access +""" +ceph dashboard module +""" + +import os + +import cherrypy + +if 'COVERAGE_ENABLED' in os.environ: + import coverage # pylint: disable=import-error + __cov = coverage.Coverage(config_file="{}/.coveragerc".format(os.path.dirname(__file__)), + data_suffix=True) + __cov.start() + cherrypy.engine.subscribe('after_request', __cov.save) + cherrypy.engine.subscribe('stop', __cov.stop) + +if 'UNITTEST' not in os.environ: + class _ModuleProxy(object): + def __init__(self): + self._mgr = None + + def init(self, module_inst): + self._mgr = module_inst + + def __getattr__(self, item): + if self._mgr is None: + raise AttributeError("global manager module instance not initialized") + return getattr(self._mgr, item) + + mgr = _ModuleProxy() + +else: + import logging + logging.basicConfig(level=logging.DEBUG) + logging.root.handlers[0].setLevel(logging.DEBUG) + import sys + + # Used to allow the running of a tox-based yml doc generator from the dashboard directory + if os.path.abspath(sys.path[0]) == os.getcwd(): + sys.path.pop(0) + + from tests import mock # type: ignore + + mgr = mock.Mock() + mgr.get_frontend_path.return_value = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'frontend/dist')) + + import rbd + + # Api tests do not mock rbd as opposed to dashboard unit tests. Both + # use UNITTEST env variable. + if isinstance(rbd, mock.Mock): + rbd.RBD_MIRROR_IMAGE_MODE_JOURNAL = 0 + rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT = 1 + +# DO NOT REMOVE: required for ceph-mgr to load a module +from .module import Module, StandbyModule # noqa: F401 diff --git a/src/pybind/mgr/dashboard/api/__init__.py b/src/pybind/mgr/dashboard/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/pybind/mgr/dashboard/api/doc.py b/src/pybind/mgr/dashboard/api/doc.py new file mode 100644 index 000000000..172d59d0a --- /dev/null +++ b/src/pybind/mgr/dashboard/api/doc.py @@ -0,0 +1,53 @@ +from enum import Enum +from typing import Any, Dict, List, Optional + + +class SchemaType(Enum): + """ + Representation of the type property of a schema object: + http://spec.openapis.org/oas/v3.0.3.html#schema-object + """ + ARRAY = 'array' + BOOLEAN = 'boolean' + INTEGER = 'integer' + NUMBER = 'number' + OBJECT = 'object' + STRING = 'string' + + def __str__(self): + return str(self.value) + + +class Schema: + """ + Representation of a schema object: + http://spec.openapis.org/oas/v3.0.3.html#schema-object + """ + + def __init__(self, schema_type: SchemaType = SchemaType.OBJECT, + properties: Optional[Dict] = None, required: Optional[List] = None): + self._type = schema_type + self._properties = properties if properties else {} + self._required = required if required else [] + + def as_dict(self) -> Dict[str, Any]: + schema: Dict[str, Any] = {'type': str(self._type)} + + if self._type == SchemaType.ARRAY: + items = Schema(properties=self._properties) + schema['items'] = items.as_dict() + else: + schema['properties'] = self._properties + + if self._required: + schema['required'] = self._required + + return schema + + +class SchemaInput: + """ + Simple DTO to transfer data in a structured manner for creating a schema object. + """ + type: SchemaType + params: List[Any] diff --git a/src/pybind/mgr/dashboard/awsauth.py b/src/pybind/mgr/dashboard/awsauth.py new file mode 100644 index 000000000..285a2c088 --- /dev/null +++ b/src/pybind/mgr/dashboard/awsauth.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# pylint: disable-all +# +# Copyright (c) 2012-2013 Paul Tax All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# 3. Neither the name of Infrae nor the names of its contributors may +# be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INFRAE OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import hmac +from base64 import encodebytes as encodestring +from email.utils import formatdate +from hashlib import sha1 as sha +from urllib.parse import unquote, urlparse + +from requests.auth import AuthBase + + +class S3Auth(AuthBase): + + """Attaches AWS Authentication to the given Request object.""" + + service_base_url = 's3.amazonaws.com' + # List of Query String Arguments of Interest + special_params = [ + 'acl', 'location', 'logging', 'partNumber', 'policy', 'requestPayment', + 'torrent', 'versioning', 'versionId', 'versions', 'website', 'uploads', + 'uploadId', 'response-content-type', 'response-content-language', + 'response-expires', 'response-cache-control', 'delete', 'lifecycle', + 'response-content-disposition', 'response-content-encoding', 'tagging', + 'notification', 'cors' + ] + + def __init__(self, access_key, secret_key, service_url=None): + if service_url: + self.service_base_url = service_url + self.access_key = str(access_key) + self.secret_key = str(secret_key) + + def __call__(self, r): + # Create date header if it is not created yet. + if 'date' not in r.headers and 'x-amz-date' not in r.headers: + r.headers['date'] = formatdate( + timeval=None, + localtime=False, + usegmt=True) + signature = self.get_signature(r) + signature = signature.decode('utf-8') + r.headers['Authorization'] = 'AWS %s:%s' % (self.access_key, signature) + return r + + def get_signature(self, r): + canonical_string = self.get_canonical_string( + r.url, r.headers, r.method) + key = self.secret_key.encode('utf-8') + msg = canonical_string.encode('utf-8') + h = hmac.new(key, msg, digestmod=sha) + return encodestring(h.digest()).strip() + + def get_interesting_headers(self, headers): + interesting_headers = { + 'content-md5': '', + 'content-type': '', + 'date': ''} + for key in headers: + lk = key.lower() + try: + if isinstance(lk, bytes): + lk = lk.decode('utf-8') + except UnicodeDecodeError: + pass + if headers[key] and (lk in interesting_headers.keys() + or lk.startswith('x-amz-')): + interesting_headers[lk] = headers[key].strip() + + # If x-amz-date is used it supersedes the date header. + if 'x-amz-date' in interesting_headers: + interesting_headers['date'] = '' + return interesting_headers + + def get_canonical_string(self, url, headers, method): + parsedurl = urlparse(url) + objectkey = parsedurl.path[1:] + query_args = sorted(parsedurl.query.split('&')) + + bucket = parsedurl.netloc[:-len(self.service_base_url)] + if len(bucket) > 1: + # remove last dot + bucket = bucket[:-1] + + interesting_headers = self.get_interesting_headers(headers) + + buf = '%s\n' % method + for key in sorted(interesting_headers.keys()): + val = interesting_headers[key] + if key.startswith('x-amz-'): + buf += '%s:%s\n' % (key, val) + else: + buf += '%s\n' % val + + # append the bucket if it exists + if bucket != '': + buf += '/%s' % bucket + + # add the objectkey. even if it doesn't exist, add the slash + buf += '/%s' % objectkey + + params_found = False + + # handle special query string arguments + for q in query_args: + k = q.split('=')[0] + if k in self.special_params: + buf += '&' if params_found else '?' + params_found = True + + try: + k, v = q.split('=', 1) + + except ValueError: + buf += q + + else: + # Riak CS multipart upload ids look like this, `TFDSheOgTxC2Tsh1qVK73A==`, + # is should be escaped to be included as part of a query string. + # + # A requests mp upload part request may look like + # resp = requests.put( + # 'https://url_here', + # params={ + # 'partNumber': 1, + # 'uploadId': 'TFDSheOgTxC2Tsh1qVK73A==' + # }, + # data='some data', + # auth=S3Auth('access_key', 'secret_key') + # ) + # + # Requests automatically escapes the values in the `params` dict, so now + # our uploadId is `TFDSheOgTxC2Tsh1qVK73A%3D%3D`, + # if we sign the request with the encoded value the signature will + # not be valid, we'll get 403 Access Denied. + # So we unquote, this is no-op if the value isn't encoded. + buf += '{key}={value}'.format(key=k, value=unquote(v)) + + return buf diff --git a/src/pybind/mgr/dashboard/cherrypy_backports.py b/src/pybind/mgr/dashboard/cherrypy_backports.py new file mode 100644 index 000000000..8871004fe --- /dev/null +++ b/src/pybind/mgr/dashboard/cherrypy_backports.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +""" +Copyright © 2004-2019, CherryPy Team (team@cherrypy.org) + +All rights reserved. + +* * * + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of CherryPy nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +from pkg_resources import parse_version + +# The SSL code in CherryPy 3.5.0 is buggy. It was fixed long ago, +# but 3.5.0 is still shipping in major linux distributions +# (Fedora 27, Ubuntu Xenial), so we must monkey patch it to get SSL working. + + +def patch_http_connection_init(v): + # It was fixed in 3.7.0. Exact lower bound version is probably earlier, + # but 3.5.0 is what this monkey patch is tested on. + if parse_version("3.5.0") <= v < parse_version("3.7.0"): + from cherrypy.wsgiserver.wsgiserver2 import CP_fileobject, HTTPConnection + + def fixed_init(hc_self, server, sock, makefile=CP_fileobject): + hc_self.server = server + hc_self.socket = sock + hc_self.rfile = makefile(sock, "rb", hc_self.rbufsize) + hc_self.wfile = makefile(sock, "wb", hc_self.wbufsize) + hc_self.requests_seen = 0 + + HTTPConnection.__init__ = fixed_init + + +# When the CherryPy server in 3.2.2 (and later) starts it attempts to verify +# that the ports its listening on are in fact bound. When using the any address +# "::" it tries both ipv4 and ipv6, and in some environments (e.g. kubernetes) +# ipv6 isn't yet configured / supported and CherryPy throws an uncaught +# exception. +def skip_wait_for_occupied_port(v): + # the issue was fixed in 3.2.3. it's present in 3.2.2 (current version on + # centos:7) and back to at least 3.0.0. + if parse_version("3.1.2") <= v < parse_version("3.2.3"): + # https://github.com/cherrypy/cherrypy/issues/1100 + from cherrypy.process import servers + servers.wait_for_occupied_port = lambda host, port: None + + +# cherrypy.wsgiserver was extracted wsgiserver into cheroot in cherrypy v9.0.0 +def patch_builtin_ssl_wrap(v, new_wrap): + if v < parse_version("9.0.0"): + from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter as builtin_ssl + else: + from cheroot.ssl.builtin import BuiltinSSLAdapter as builtin_ssl # type: ignore + builtin_ssl.wrap = new_wrap(builtin_ssl.wrap) + + +def accept_exceptions_from_builtin_ssl(v): + # the fix was included by cheroot v5.2.0, which was included by cherrypy + # 10.2.0. + if v < parse_version("10.2.0"): + # see https://github.com/cherrypy/cheroot/pull/4 + import ssl + + def accept_ssl_errors(func): + def wrapper(self, sock): + try: + return func(self, sock) + except ssl.SSLError as e: + if e.errno == ssl.SSL_ERROR_SSL: + # Check if it's one of the known errors + # Errors that are caught by PyOpenSSL, but thrown by + # built-in ssl + _block_errors = ('unknown protocol', 'unknown ca', 'unknown_ca', + 'unknown error', + 'https proxy request', 'inappropriate fallback', + 'wrong version number', + 'no shared cipher', 'certificate unknown', + 'ccs received early', + 'certificate verify failed', # client cert w/o trusted CA + 'version too low', # caused by SSL3 connections + 'unsupported protocol', # caused by TLS1 connections + 'sslv3 alert bad certificate') + for error_text in _block_errors: + if error_text in e.args[1].lower(): + # Accepted error, let's pass + return None, {} + raise + return wrapper + patch_builtin_ssl_wrap(v, accept_ssl_errors) + + +def accept_socket_error_0(v): + # see https://github.com/cherrypy/cherrypy/issues/1618 + try: + import cheroot + cheroot_version = parse_version(cheroot.__version__) + except ImportError: + pass + + if v < parse_version("9.0.0") or cheroot_version < parse_version("6.5.5"): + generic_socket_error = OSError + + def accept_socket_error_0(func): + def wrapper(self, sock): + try: + return func(self, sock) + except generic_socket_error as e: + """It is unclear why exactly this happens. + + It's reproducible only with openssl>1.0 and stdlib ``ssl`` wrapper. + In CherryPy it's triggered by Checker plugin, which connects + to the app listening to the socket port in TLS mode via plain + HTTP during startup (from the same process). + + Ref: https://github.com/cherrypy/cherrypy/issues/1618 + """ + import ssl + is_error0 = e.args == (0, 'Error') + IS_ABOVE_OPENSSL10 = ssl.OPENSSL_VERSION_INFO >= (1, 1) + del ssl + if is_error0 and IS_ABOVE_OPENSSL10: + return None, {} + raise + return wrapper + patch_builtin_ssl_wrap(v, accept_socket_error_0) + + +def patch_request_unique_id(v): + """ + Older versions of cherrypy don't include request.unique_id field (a lazily + calculated UUID4). + + Monkey-patching is preferred over alternatives as inheritance, as it'd break + type checks (cherrypy/lib/cgtools.py: `isinstance(obj, _cprequest.Request)`) + """ + if v < parse_version('11.1.0'): + import uuid + from functools import update_wrapper + + from cherrypy._cprequest import Request + + class LazyUUID4(object): + def __str__(self): + """Return UUID4 and keep it for future calls.""" + return str(self.uuid4) + + @property + def uuid4(self): + """Provide unique id on per-request basis using UUID4. + It's evaluated lazily on render. + """ + try: + self._uuid4 # type: ignore + except AttributeError: + # evaluate on first access + self._uuid4 = uuid.uuid4() + + return self._uuid4 + + old_init = Request.__init__ + + def init_with_unique_id(self, *args, **kwargs): + old_init(self, *args, **kwargs) + self.unique_id = LazyUUID4() + + Request.__init__ = update_wrapper(init_with_unique_id, old_init) + + +def patch_cherrypy(v): + ver = parse_version(v) + patch_http_connection_init(ver) + skip_wait_for_occupied_port(ver) + accept_exceptions_from_builtin_ssl(ver) + accept_socket_error_0(ver) + patch_request_unique_id(ver) diff --git a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh new file mode 100755 index 000000000..1c2c4b3cd --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -x + +export PATH=/root/bin:$PATH +mkdir /root/bin + +export CEPHADM_IMAGE='quay.ceph.io/ceph-ci/ceph:reef' + +CEPHADM="/root/bin/cephadm" + +/mnt/{{ ceph_dev_folder }}/src/cephadm/build.sh $CEPHADM +mkdir -p /etc/ceph +mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}') + +bootstrap_extra_options='--allow-fqdn-hostname --dashboard-password-noupdate' + +# commenting the below lines. Uncomment it when any extra options are +# needed for the bootstrap. +# bootstrap_extra_options_not_expanded='' +# {% if expanded_cluster is not defined %} +# bootstrap_extra_options+=" ${bootstrap_extra_options_not_expanded}" +# {% endif %} + +$CEPHADM bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --shared_ceph_folder /mnt/{{ ceph_dev_folder }} ${bootstrap_extra_options} + +fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}') +cephadm_shell="$CEPHADM shell --fsid ${fsid} -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring" + +{% for number in range(1, nodes) %} + ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@192.168.100.10{{ number }} + {% if expanded_cluster is defined %} + ${cephadm_shell} ceph orch host add {{ prefix }}-node-0{{ number }} + {% endif %} +{% endfor %} + +{% if expanded_cluster is defined %} + ${cephadm_shell} ceph orch apply osd --all-available-devices +{% endif %} diff --git a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml new file mode 100755 index 000000000..a334fbad5 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml @@ -0,0 +1,45 @@ +parameters: + nodes: 4 + node_ip_offset: 100 + pool: ceph-dashboard + network: ceph-dashboard + gateway: 192.168.100.1 + netmask: 255.255.255.0 + prefix: ceph + numcpus: 1 + memory: 2048 + image: fedora36 + notify: false + admin_password: password + disks: + - 15 + - 5 + - 5 + +{% for number in range(0, nodes) %} +{{ prefix }}-node-0{{ number }}: + image: {{ image }} + numcpus: {{ numcpus }} + memory: {{ memory }} + reserveip: true + reservedns: true + sharedkey: true + nets: + - name: {{ network }} + ip: 192.168.100.{{ node_ip_offset + number }} + gateway: {{ gateway }} + mask: {{ netmask }} + dns: {{ gateway }} + disks: {{ disks }} + pool: {{ pool }} + sharedfolders: [{{ ceph_dev_folder }}] + files: + - bootstrap-cluster.sh + cmds: + - dnf -y install python3 chrony lvm2 podman + - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config + - setenforce 0 + {% if number == 0 %} + - bash /root/bootstrap-cluster.sh + {% endif %} +{% endfor %} diff --git a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh new file mode 100755 index 000000000..a48f759f5 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -ex + +: ${CYPRESS_BASE_URL:=''} +: ${CYPRESS_LOGIN_USER:='admin'} +: ${CYPRESS_LOGIN_PWD:='password'} +: ${CYPRESS_ARGS:=''} +: ${DASHBOARD_PORT:='8443'} + +get_vm_ip () { + local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') + echo -n $ip +} + +if [[ -n "${JENKINS_HOME}" || (-z "${CYPRESS_BASE_URL}" && -z "$(get_vm_ip ceph-node-00)") ]]; then + . "$(dirname $0)"/start-cluster.sh + + CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}" +fi + +export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD + +cypress_run () { + local specs="$1" + local timeout="$2" + local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs},chromeWebSecurity=false" + if [[ -n "$timeout" ]]; then + override_config="${override_config},defaultCommandTimeout=${timeout}" + fi + + rm -f cypress/reports/results-*.xml || true + + npx --no-install cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config" +} + +: ${CEPH_DEV_FOLDER:=${PWD}} + +cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend + +kcli ssh -u root ceph-node-00 'cephadm shell "ceph config set mgr mgr/prometheus/exclude_perf_counters false"' + +# check if the prometheus daemon is running +# before starting the e2e tests + +PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running') +while [[ $PROMETHEUS_RUNNING_COUNT -lt 1 ]]; do + PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running') +done + +# grafana ip address is set to the fqdn by default. +# kcli is not working with that, so setting the IP manually. +kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-alertmanager-api-host http://192.168.100.100:9093"' +kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-prometheus-api-host http://192.168.100.100:9095"' +kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-grafana-api-url https://192.168.100.100:3000"' +kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch apply node-exporter --placement 'count:2'"' + +cypress_run ["cypress/e2e/orchestrator/workflow/*.feature","cypress/e2e/orchestrator/workflow/*-spec.ts"] +cypress_run "cypress/e2e/orchestrator/grafana/*.feature" diff --git a/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh new file mode 100755 index 000000000..65cb78a45 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +set -eEx + +on_error() { + set +x + if [ "$1" != "0" ]; then + echo "ERROR $1 thrown on line $2" + echo + echo "Collecting info..." + echo + echo "Saving MGR logs:" + echo + mkdir -p ${CEPH_DEV_FOLDER}/logs + kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log + for vm_id in {0..3} + do + local vm="ceph-node-0${vm_id}" + echo "Saving journalctl from VM ${vm}:" + echo + kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true + echo "Saving container logs:" + echo + kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true + done + echo "TEST FAILED." + fi +} + +trap 'on_error $? $LINENO' ERR + +sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts || true + +: ${CEPH_DEV_FOLDER:=${PWD}} +EXTRA_PARAMS='' +DEV_MODE='' +# Check script args/options. +for arg in "$@"; do + shift + case "$arg" in + "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;; + "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;; + esac +done + +kcli delete plan -y ceph || true + +# Build dashboard frontend (required to start the module). +cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend +export NG_CLI_ANALYTICS=false +if [[ -n "$JENKINS_HOME" ]]; then + npm cache clean --force +fi +npm ci +FRONTEND_BUILD_OPTS='--configuration=production' +if [[ -n "${DEV_MODE}" ]]; then + FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch' +fi +npm run build ${FRONTEND_BUILD_OPTS} & + +cd ${CEPH_DEV_FOLDER} +: ${VM_IMAGE:='fedora36'} +: ${VM_IMAGE_URL:='https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2'} +kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE} +kcli delete plan -y ceph || true +kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \ + -P ceph_dev_folder=${CEPH_DEV_FOLDER} \ + ${EXTRA_PARAMS} ceph + +: ${CLUSTER_DEBUG:=0} +: ${DASHBOARD_CHECK_INTERVAL:=10} +while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do + sleep ${DASHBOARD_CHECK_INTERVAL} + kcli list vm + if [[ ${CLUSTER_DEBUG} != 0 ]]; then + kcli ssh -u root -- ceph-node-00 'podman ps -a' + kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)' + fi + kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init' +done diff --git a/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py b/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py new file mode 100644 index 000000000..d37337b40 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# pylint: disable=F0401 +""" +This script does: +* Scan through Angular html templates and extract tags +* Check if every tag has a corresponding Grafana dashboard by `uid` + +Usage: + python + + \ No newline at end of file diff --git a/src/pybind/mgr/dashboard/frontend/dist/en-US/main.a87f559bb03ca0fb.js b/src/pybind/mgr/dashboard/frontend/dist/en-US/main.a87f559bb03ca0fb.js new file mode 100644 index 000000000..feac3d82e --- /dev/null +++ b/src/pybind/mgr/dashboard/frontend/dist/en-US/main.a87f559bb03ca0fb.js @@ -0,0 +1,3 @@ +globalThis.$localize=Object.assign(globalThis.$localize || {},{locale:"en-US"}); +"use strict";(function(global){global.ng=global.ng||{};global.ng.common=global.ng.common||{};global.ng.common.locales=global.ng.common.locales||{};const u=undefined;function plural(val){const n=val,i=Math.floor(Math.abs(val)),v=val.toString().replace(/^[^.]*\.?/,"").length;if(i===1&&v===0)return 1;return 5}global.ng.common.locales["en"]=["en",[["a","p"],["AM","PM"],u],[["AM","PM"],u,u],[["S","M","T","W","T","F","S"],["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],["Su","Mo","Tu","We","Th","Fr","Sa"]],u,[["J","F","M","A","M","J","J","A","S","O","N","D"],["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],["January","February","March","April","May","June","July","August","September","October","November","December"]],u,[["B","A"],["BC","AD"],["Before Christ","Anno Domini"]],0,[6,0],["M/d/yy","MMM d, y","MMMM d, y","EEEE, MMMM d, y"],["h:mm a","h:mm:ss a","h:mm:ss a z","h:mm:ss a zzzz"],["{1}, {0}",u,"{1} 'at' {0}",u],[".",",",";","%","+","-","E","\xD7","\u2030","\u221E","NaN",":"],["#,##0.###","#,##0%","\xA4#,##0.00","#E0"],"USD","$","US Dollar",{},"ltr",plural,[[["mi","n","in the morning","in the afternoon","in the evening","at night"],["midnight","noon","in the morning","in the afternoon","in the evening","at night"],u],[["midnight","noon","morning","afternoon","evening","night"],u,u],["00:00","12:00",["06:00","12:00"],["12:00","18:00"],["18:00","21:00"],["21:00","06:00"]]]]})(typeof globalThis!=="undefined"&&globalThis||typeof global!=="undefined"&&global||typeof window!=="undefined"&&window);; +(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[179],{43155:(E,C)=>{"use strict";C.N=void 0;var r=/^([^\w]*)(javascript|data|vbscript)/im,a=/&#(\w+)(^\w|;)?/g,c=/[\u0000-\u001F\u007F-\u009F\u2000-\u200D\uFEFF]/gim,u=/^([^:]+):/gm,e=[".","/"];C.N=function T(M){var w=function m(M){return M.replace(a,function(w,D){return String.fromCharCode(D)})}(M||"").replace(c,"").trim();if(!w)return"about:blank";if(function f(M){return e.indexOf(M[0])>-1}(w))return w;var D=w.match(u);return D&&r.test(D[0])?"about:blank":w}},62946:(E,C,s)=>{"use strict";s.d(C,{iM:()=>Tf,qr:()=>b1,xc:()=>Av});var r=s(64537),a=s(88692),c=function(L,q){return(c=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(j,Ae){j.__proto__=Ae}||function(j,Ae){for(var St in Ae)Ae.hasOwnProperty(St)&&(j[St]=Ae[St])})(L,q)};function u(L,q){function j(){this.constructor=L}c(L,q),L.prototype=null===q?Object.create(q):(j.prototype=q.prototype,new j)}var e=function(){return e=Object.assign||function(q){for(var j,Ae=1,St=arguments.length;Ae0)&&!(St=Ae.next()).done;)Kt.push(St.value)}catch(Br){ur={error:Br}}finally{try{St&&!St.done&&(j=Ae.return)&&j.call(Ae)}finally{if(ur)throw ur.error}}return Kt}function m(){for(var L=[],q=0;q2&&Fe("box");var j=pr(q);return new ji(L,Eo(j),j.name,!0,j.equals)},shallowBox:function(L,q){return arguments.length>2&&Fe("shallowBox"),jt.box(L,{name:q,deep:!1})},array:function(L,q){arguments.length>2&&Fe("array");var j=pr(q);return new sc(L,Eo(j),j.name)},shallowArray:function(L,q){return arguments.length>2&&Fe("shallowArray"),jt.array(L,{name:q,deep:!1})},map:function(L,q){arguments.length>2&&Fe("map");var j=pr(q);return new kl(L,Eo(j),j.name)},shallowMap:function(L,q){return arguments.length>2&&Fe("shallowMap"),jt.map(L,{name:q,deep:!1})},set:function(L,q){arguments.length>2&&Fe("set");var j=pr(q);return new Ee(L,Eo(j),j.name)},object:function(L,q,j){return"string"==typeof arguments[1]&&Fe("object"),function qu(L,q,j,Ae){var Kt=(Ae=pr(Ae)).defaultDecorator||(!1===Ae.deep?qr:po);Tt(L),Gt(L,Ae.name,Kt.enhancer),Is();try{for(var St in q){var ur=Object.getOwnPropertyDescriptor(q,St),Ii=(j&&St in j?j[St]:ur.get?Ie:Kt)(L,St,ur,!0);Ii&&Object.defineProperty(L,St,Ii)}}finally{la()}return L}({},L,q,pr(j))},shallowObject:function(L,q){return"string"==typeof arguments[1]&&Fe("shallowObject"),jt.object(L,{},{name:q,deep:!1})},ref:qr,shallow:$i,deep:po,struct:Hi},jt=function Dn(L,q,j){if("string"==typeof arguments[1])return po.apply(null,arguments);if(El(L))return L;var Ae=fe(L)?jt.object(L,q,j):Array.isArray(L)?jt.array(L,q):Le(L)?jt.map(L,q):Pt(L)?jt.set(L,q):L;if(Ae!==L)return Ae;$(!1)};function Fe(L){$("Expected one or two arguments to observable."+L+". Did you accidentally try to use observable."+L+" as decorator?")}Object.keys(Hn).forEach(function(L){return jt[L]=Hn[L]});var Ie=wn(!1,function(L,q,j,Ae,St){!function kn(L,q,j){var Ae=Gt(L);j.name=Ae.name+"."+q,j.context=L,Ae.values[q]=new Po(j),Object.defineProperty(L,q,function Go(L){return Xr[L]||(Xr[L]={configurable:Bn.computedConfigurable,enumerable:!1,get:function(){return Rr(this).read(this,L)},set:function(q){Rr(this).write(this,L,q)}})}(q))}(L,q,e({get:j.get,set:j.set},St[0]||{}))}),et=Ie({equals:jr.structural}),ze=function(q,j,Ae){if("string"==typeof j||null!==q&&"object"==typeof q&&1===arguments.length)return Ie.apply(null,arguments);var St="object"==typeof j?j:{};return St.get=q,St.set="function"==typeof j?j:St.set,St.name=St.name||q.name||"",new Po(St)};ze.struct=et;var an=(()=>{return(L=an||(an={}))[L.NOT_TRACKING=-1]="NOT_TRACKING",L[L.UP_TO_DATE=0]="UP_TO_DATE",L[L.POSSIBLY_STALE=1]="POSSIBLY_STALE",L[L.STALE=2]="STALE",an;var L})(),lt=(()=>{return(L=lt||(lt={}))[L.NONE=0]="NONE",L[L.LOG=1]="LOG",L[L.BREAK=2]="BREAK",lt;var L})(),Rt=function L(q){this.cause=q};function Pe(L){return L instanceof Rt}function qn(L){switch(L.dependenciesState){case an.UP_TO_DATE:return!1;case an.NOT_TRACKING:case an.STALE:return!0;case an.POSSIBLY_STALE:for(var q=dn(),j=L.observing,Ae=j.length,St=0;St0;Bn.computationDepth>0&&q&&$(!1),!Bn.allowStateChanges&&(q||"strict"===Bn.enforceActions)&&$(!1)}function Pr(L,q,j){var Ae=Ot(!0);wr(L),L.newObserving=new Array(L.observing.length+100),L.unboundDepsCount=0,L.runId=++Bn.runId;var Kt,St=Bn.trackingDerivation;if(Bn.trackingDerivation=L,!0===Bn.disableErrorBoundaries)Kt=q.call(j);else try{Kt=q.call(j)}catch(ur){Kt=new Rt(ur)}return Bn.trackingDerivation=St,function Zn(L){for(var q=L.observing,j=L.observing=L.newObserving,Ae=an.UP_TO_DATE,St=0,Kt=L.unboundDepsCount,ur=0;urAe&&(Ae=Br.dependenciesState);for(j.length=St,L.newObserving=null,Kt=q.length;Kt--;)0===(Br=q[Kt]).diffValue&&ss(Br,L),Br.diffValue=0;for(;St--;){var Br;1===(Br=j[St]).diffValue&&(Br.diffValue=0,jo(Br,L))}Ae!==an.UP_TO_DATE&&(L.dependenciesState=Ae,L.onBecomeStale())}(L),mn(Ae),Kt}function nr(L){var q=L.observing;L.observing=[];for(var j=q.length;j--;)ss(q[j],L);L.dependenciesState=an.NOT_TRACKING}function Zt(L){var q=dn(),j=L();return Ge(q),j}function dn(){var L=Bn.trackingDerivation;return Bn.trackingDerivation=null,L}function Ge(L){Bn.trackingDerivation=L}function Ot(L){var q=Bn.allowStateReads;return Bn.allowStateReads=L,q}function mn(L){Bn.allowStateReads=L}function wr(L){if(L.dependenciesState!==an.UP_TO_DATE){L.dependenciesState=an.UP_TO_DATE;for(var q=L.observing,j=q.length;j--;)q[j].lowestObserverState=an.UP_TO_DATE}}var Ti=0,Ci=1;function Ai(L,q){var j=function(){return function Ko(L,q,j,Ae){var St=function _s(L,q,j){var Ae=fa()&&!!L,St=0;if(Ae){St=Date.now();var Kt=j&&j.length||0,ur=new Array(Kt);if(Kt>0)for(var Br=0;Br0&&!L.__mobxGlobals&&(Vt=!1),L.__mobxGlobals&&L.__mobxGlobals.version!==(new ro).version&&(Vt=!1),Vt?L.__mobxGlobals?(L.__mobxInstanceCount+=1,L.__mobxGlobals.UNCHANGED||(L.__mobxGlobals.UNCHANGED={}),L.__mobxGlobals):(L.__mobxInstanceCount=1,L.__mobxGlobals=new ro):(setTimeout(function(){$("There are multiple, different versions of MobX active. Make sure MobX is loaded only once or use `configure({ isolateGlobalState: true })`")},1),new ro));function jo(L,q){var j=L.observers.length;j&&(L.observersIndexes[q.__mapid]=j),L.observers[j]=q,L.lowestObserverState>q.dependenciesState&&(L.lowestObserverState=q.dependenciesState)}function ss(L,q){if(1===L.observers.length)L.observers.length=0,gs(L);else{var j=L.observers,Ae=L.observersIndexes,St=j.pop();if(St!==q){var Kt=Ae[q.__mapid]||0;Kt?Ae[St.__mapid]=Kt:delete Ae[St.__mapid],j[Kt]=St}delete Ae[q.__mapid]}}function gs(L){!1===L.isPendingUnobservation&&(L.isPendingUnobservation=!0,Bn.pendingUnobservations.push(L))}function Is(){Bn.inBatch++}function la(){if(0==--Bn.inBatch){hs();for(var L=Bn.pendingUnobservations,q=0;q0&&gs(L),!1)}function da(L,q){if(console.log("[mobx.trace] '"+L.name+"' is invalidated due to a change in: '"+q.name+"'"),L.isTracing===lt.BREAK){var j=[];$a(function Ol(L,q){return Kc(Fr(L,q))}(L),j,1),new Function("debugger;\n/*\nTracing '"+L.name+"'\n\nYou are entering this break point because derivation '"+L.name+"' is being traced and '"+q.name+"' is now forcing it to update.\nJust follow the stacktrace you should now see in the devtools to see precisely what piece of your code is causing this update\nThe stackframe you are looking for is at least ~6-8 stack-frames up.\n\n"+(L instanceof Po?L.derivation.toString().replace(/[*]\//g,"/"):"")+"\n\nThe dependencies for this derivation are:\n\n"+j.join("\n")+"\n*/\n ")()}}function $a(L,q,j){q.length>=1e3?q.push("(and many more)"):(q.push(""+new Array(j).join("\t")+L.name),L.dependencies&&L.dependencies.forEach(function(Ae){return $a(Ae,q,j+1)}))}var Rl=function(){function L(q,j,Ae,St){void 0===q&&(q="Reaction@"+W()),void 0===St&&(St=!1),this.name=q,this.onInvalidate=j,this.errorHandler=Ae,this.requiresObservable=St,this.observing=[],this.newObserving=[],this.dependenciesState=an.NOT_TRACKING,this.diffValue=0,this.runId=0,this.unboundDepsCount=0,this.__mapid="#"+W(),this.isDisposed=!1,this._isScheduled=!1,this._isTrackPending=!1,this._isRunning=!1,this.isTracing=lt.NONE}return L.prototype.onBecomeStale=function(){this.schedule()},L.prototype.schedule=function(){this._isScheduled||(this._isScheduled=!0,Bn.pendingReactions.push(this),hs())},L.prototype.isScheduled=function(){return this._isScheduled},L.prototype.runReaction=function(){if(!this.isDisposed){if(Is(),this._isScheduled=!1,qn(this)){this._isTrackPending=!0;try{this.onInvalidate(),this._isTrackPending&&fa()&&Xo({name:this.name,type:"scheduled-reaction"})}catch(q){this.reportExceptionInDerivation(q)}}la()}},L.prototype.track=function(q){Is();var Ae,j=fa();j&&(Ae=Date.now(),No({name:this.name,type:"reaction"})),this._isRunning=!0;var St=Pr(this,q,void 0);this._isRunning=!1,this._isTrackPending=!1,this.isDisposed&&nr(this),Pe(St)&&this.reportExceptionInDerivation(St.cause),j&&ns({time:Date.now()-Ae}),la()},L.prototype.reportExceptionInDerivation=function(q){var j=this;if(this.errorHandler)this.errorHandler(q,this);else{if(Bn.disableErrorBoundaries)throw q;var Ae="[mobx] Encountered an uncaught exception that was thrown by a reaction or observer component, in: '"+this+"'";Bn.suppressReactionErrors?console.warn("[mobx] (error in reaction '"+this.name+"' suppressed, fix error of causing action below)"):console.error(Ae,q),fa()&&Xo({type:"error",name:this.name,message:Ae,error:""+q}),Bn.globalReactionErrorHandlers.forEach(function(St){return St(q,j)})}},L.prototype.dispose=function(){this.isDisposed||(this.isDisposed=!0,this._isRunning||(Is(),nr(this),la()))},L.prototype.getDisposer=function(){var q=this.dispose.bind(this);return q.$mobx=this,q},L.prototype.toString=function(){return"Reaction["+this.name+"]"},L.prototype.trace=function(q){void 0===q&&(q=!1),function gc(){for(var L=[],q=0;q0||Bn.isRunningReactions||Ts($s)}function $s(){Bn.isRunningReactions=!0;for(var L=Bn.pendingReactions,q=0;L.length>0;){++q===Ha&&(console.error("Reaction doesn't converge to a stable state after "+Ha+" iterations. Probably there is a cycle in the reactive function: "+L[0]),L.splice(0));for(var j=L.splice(0),Ae=0,St=j.length;Ae",q):2===arguments.length&&"function"==typeof j?Ai(q,j):1===arguments.length&&"string"==typeof q?io(q):!0!==St?io(j).apply(null,arguments):void(q[j]=Ai(q.name||j,Ae.value))};function gn(L,q,j){ge(L,q,Ai(q,j.bind(L)))}function vi(L,q){void 0===q&&(q=w);var St,j=q&&q.name||L.name||"Autorun@"+W();if(q.scheduler||q.delay){var Kt=Xi(q),ur=!1;St=new Rl(j,function(){ur||(ur=!0,Kt(function(){ur=!1,St.isDisposed||St.track(Br)}))},q.onError,q.requiresObservable)}else St=new Rl(j,function(){this.track(Br)},q.onError,q.requiresObservable);function Br(){L(St)}return St.schedule(),St.getDisposer()}ie.bound=function Tn(L,q,j,Ae){return!0===Ae?(gn(L,q,j.value),null):j?{configurable:!0,enumerable:!1,get:function(){return gn(this,q,j.value||j.initializer.call(this)),this[q]},set:zr}:{enumerable:!1,configurable:!0,set:function(St){gn(this,q,St)},get:function(){}}};var Bi=function(L){return L()};function Xi(L){return L.scheduler?L.scheduler:L.delay?function(q){return setTimeout(q,L.delay)}:Bi}function ws(L,q,j){void 0===j&&(j=w),"boolean"==typeof j&&(j={fireImmediately:j});var ms,Ae=j.name||"Reaction@"+W(),St=ie(Ae,j.onError?function ds(L,q){return function(){try{return q.apply(this,arguments)}catch(j){L.call(this,j)}}}(j.onError,q):q),Kt=!j.scheduler&&!j.delay,ur=Xi(j),Br=!0,Ii=!1,vs=j.compareStructural?jr.structural:j.equals||jr.default,Ks=new Rl(Ae,function(){Br||Kt?Vl():Ii||(Ii=!0,ur(Vl))},j.onError,j.requiresObservable);function Vl(){if(Ii=!1,!Ks.isDisposed){var Xu=!1;Ks.track(function(){var Fu=L(Ks);Xu=Br||!vs(ms,Fu),ms=Fu}),Br&&j.fireImmediately&&St(ms,Ks),!Br&&!0===Xu&&St(ms,Ks),Br&&(Br=!1)}}return Ks.schedule(),Ks.getDisposer()}function Js(L,q,j){return Ll("onBecomeUnobserved",L,q,j)}function Ll(L,q,j,Ae){var St="function"==typeof Ae?Fr(q,j):Fr(q),Kt="function"==typeof Ae?Ae:j,ur=St[L];return"function"!=typeof ur?$(!1):(St[L]=function(){ur.call(this),Kt.call(this)},function(){St[L]=ur})}function Kc(L){var q={name:L.name};return L.observing&&L.observing.length>0&&(q.dependencies=function ce(L){var q=[];return L.forEach(function(j){-1===q.indexOf(j)&&q.push(j)}),q}(L.observing).map(Kc)),q}function El(L){return 1!==arguments.length&&$(!1),function ua(L,q){if(null==L)return!1;if(void 0!==q){if(Gr(L)){var j=L.$mobx;return j.values&&!!j.values[q]}return!1}return Gr(L)||!!L.$mobx||xn(L)||Aa(L)||ko(L)}(L)}function Al(L,q){void 0===q&&(q=void 0),Is();try{return L.apply(q)}finally{la()}}function bt(L){return void 0!==L.interceptors&&L.interceptors.length>0}function pt(L,q){var j=L.interceptors||(L.interceptors=[]);return j.push(q),de(function(){var Ae=j.indexOf(q);-1!==Ae&&j.splice(Ae,1)})}function Je(L,q){var j=dn();try{var Ae=L.interceptors;if(Ae)for(var St=0,Kt=Ae.length;St0}function fi(L,q){var j=L.changeListeners||(L.changeListeners=[]);return j.push(q),de(function(){var Ae=j.indexOf(q);-1!==Ae&&j.splice(Ae,1)})}function To(L,q){var j=dn(),Ae=L.changeListeners;if(Ae){for(var St=0,Kt=(Ae=Ae.slice()).length;St0?q.map(this.dehancer):q},L.prototype.intercept=function(q){return pt(this,q)},L.prototype.observe=function(q,j){return void 0===j&&(j=!1),j&&q({object:this.array,type:"splice",index:0,added:this.values.slice(),addedCount:this.values.length,removed:[],removedCount:0}),fi(this,q)},L.prototype.getArrayLength=function(){return this.atom.reportObserved(),this.values.length},L.prototype.setArrayLength=function(q){if("number"!=typeof q||q<0)throw new Error("[mobx.array] Out of range: "+q);var j=this.values.length;if(q!==j)if(q>j){for(var Ae=new Array(q-j),St=0;St0&&q+j+1>Hs&&ec(q+j+1)},L.prototype.spliceWithArray=function(q,j,Ae){var St=this;Pn(this.atom);var Kt=this.values.length;if(void 0===q?q=0:q>Kt?q=Kt:q<0&&(q=Math.max(0,Kt+q)),j=1===arguments.length?Kt-q:null==j?0:Math.max(0,Math.min(j,Kt-q)),void 0===Ae&&(Ae=M),bt(this)){var ur=Je(this,{object:this.array,type:"splice",index:q,removedCount:j,added:Ae});if(!ur)return M;j=ur.removedCount,Ae=ur.added}Ae=0===Ae.length?Ae:Ae.map(function(ms){return St.enhancer(ms,void 0)}),this.updateArrayLength(Kt,Ae.length-j);var Ii=this.spliceItemsIntoValues(q,j,Ae);return(0!==j||0!==Ae.length)&&this.notifyArraySplice(q,Ae,Ii),this.dehanceValues(Ii)},L.prototype.spliceItemsIntoValues=function(q,j,Ae){var St;if(Ae.length<1e4)return(St=this.values).splice.apply(St,m([q,j],Ae));var Kt=this.values.slice(q,q+j);return this.values=this.values.slice(0,q).concat(Ae,this.values.slice(q+j)),Kt},L.prototype.notifyArrayChildUpdate=function(q,j,Ae){var St=!this.owned&&fa(),Kt=en(this),ur=Kt||St?{object:this.array,type:"update",index:q,newValue:j,oldValue:Ae}:null;St&&No(e({},ur,{name:this.atom.name})),this.atom.reportChanged(),Kt&&To(this,ur),St&&ns()},L.prototype.notifyArraySplice=function(q,j,Ae){var St=!this.owned&&fa(),Kt=en(this),ur=Kt||St?{object:this.array,type:"splice",index:q,removed:Ae,added:j,removedCount:Ae.length,addedCount:j.length}:null;St&&No(e({},ur,{name:this.atom.name})),this.atom.reportChanged(),Kt&&To(this,ur),St&&ns()},L}(),sc=function(L){function q(j,Ae,St,Kt){void 0===St&&(St="ObservableArray@"+W()),void 0===Kt&&(Kt=!1);var ur=L.call(this)||this,Br=new zl(St,Ae,ur,Kt);if(Et(ur,"$mobx",Br),j&&j.length){var Ii=ti(!0);ur.spliceWithArray(0,0,j),Vr(Ii)}return mi&&Object.defineProperty(Br.array,"0",hu),ur}return u(q,L),q.prototype.intercept=function(j){return this.$mobx.intercept(j)},q.prototype.observe=function(j,Ae){return void 0===Ae&&(Ae=!1),this.$mobx.observe(j,Ae)},q.prototype.clear=function(){return this.splice(0)},q.prototype.concat=function(){for(var j=[],Ae=0;Ae-1&&(this.splice(Ae,1),!0)},q.prototype.move=function(j,Ae){function St(Br){if(Br<0)throw new Error("[mobx.array] Index out of bounds: "+Br+" is negative");var Ii=this.$mobx.values.length;if(Br>=Ii)throw new Error("[mobx.array] Index out of bounds: "+Br+" is not smaller than "+Ii)}if(St.call(this,j),St.call(this,Ae),j!==Ae){var ur,Kt=this.$mobx.values;ur=j0){if(++q>=Ri)return arguments[0]}else q=0;return L.apply(void 0,arguments)}}(Xn);const wl=Ms,Qa=function Ho(L,q){return wl(je(L,q,Pa),L+"")};var rn=s(15131),Jl=s(2951),le=s(66224);const De=function ae(L,q,j){(void 0!==j&&!(0,le.Z)(L[q],j)||void 0===j&&!(q in L))&&(0,Jl.Z)(L,q,j)};var zt=function Ve(L){return function(q,j,Ae){for(var St=-1,Kt=Object(q),ur=Ae(q),Br=ur.length;Br--;){var Ii=ur[L?Br:++St];if(!1===j(Kt[Ii],Ii,Kt))break}return q}}();const Qt=zt;var Gn=s(27672),Er=s(1044),Nr=s(36889),Mi=s(42542),ao=s(40591),Jo=s(34654),rs=s(18402),ys=s(6539);var eu=s(25014),mu=s(58209),wu=s(4214),Rc=s(98286),fu=s(11595),vc=Function.prototype.toString,La=Object.prototype.hasOwnProperty,al=vc.call(Object);const xa=function rl(L){if(!(0,ys.Z)(L)||"[object Object]"!=(0,Rc.Z)(L))return!1;var q=(0,fu.Z)(L);if(null===q)return!0;var j=La.call(q,"constructor")&&q.constructor;return"function"==typeof j&&j instanceof j&&vc.call(j)==al};var Tu=s(14803);const Pu=function En(L,q){if(("constructor"!==q||"function"!=typeof L[q])&&"__proto__"!=q)return L[q]};var za=s(57640),Va=s(34673);const Hc=function ld(L,q,j,Ae,St,Kt,ur){var Br=Pu(L,j),Ii=Pu(q,j),ms=ur.get(Ii);if(ms)De(L,j,ms);else{var vs=Kt?Kt(Br,Ii,j+"",L,q,ur):void 0,Ks=void 0===vs;if(Ks){var Vl=(0,Jo.Z)(Ii),Xu=!Vl&&(0,eu.Z)(Ii),Fu=!Vl&&!Xu&&(0,Tu.Z)(Ii);vs=Ii,Vl||Xu||Fu?(0,Jo.Z)(Br)?vs=Br:function Ps(L){return(0,ys.Z)(L)&&(0,rs.Z)(L)}(Br)?vs=(0,Nr.Z)(Br):Xu?(Ks=!1,vs=(0,Gn.Z)(Ii,!0)):Fu?(Ks=!1,vs=(0,Er.Z)(Ii,!0)):vs=[]:xa(Ii)||(0,ao.Z)(Ii)?(vs=Br,(0,ao.Z)(Br)?vs=function Os(L){return(0,za.Z)(L,(0,Va.Z)(L))}(Br):(!(0,wu.Z)(Br)||(0,mu.Z)(Br))&&(vs=(0,Mi.Z)(Ii))):Ks=!1}Ks&&(ur.set(Ii,vs),St(vs,Ii,Ae,Kt,ur),ur.delete(Ii)),De(L,j,vs)}},ud=function Vu(L,q,j,Ae,St){L!==q&&Qt(q,function(Kt,ur){if(St||(St=new rn.Z),(0,wu.Z)(Kt))Hc(L,q,ur,j,Vu,Ae,St);else{var Br=Ae?Ae(Pu(L,ur),Kt,ur+"",L,q,St):void 0;void 0===Br&&(Br=Kt),De(L,ur,Br)}},Va.Z)},tf=function md(L,q,j,Ae,St,Kt){return(0,wu.Z)(L)&&(0,wu.Z)(q)&&(Kt.set(q,L),ud(L,q,void 0,md,Kt),Kt.delete(q)),L};var Uf=s(28078);const Uc=function Mu(L,q,j){if(!(0,wu.Z)(j))return!1;var Ae=typeof q;return!!("number"==Ae?(0,rs.Z)(j)&&(0,Uf.Z)(q,j.length):"string"==Ae&&q in j)&&(0,le.Z)(j[q],L)};var ip=function Zu(L){return Qa(function(q,j){var Ae=-1,St=j.length,Kt=St>1?j[St-1]:void 0,ur=St>2?j[2]:void 0;for(Kt=L.length>3&&"function"==typeof Kt?(St--,Kt):void 0,ur&&Uc(j[0],j[1],ur)&&(Kt=St<3?void 0:Kt,St=1),q=Object(q);++AeSt?0:St+q),(j=j>St?St:j)<0&&(j+=St),St=q>j?0:j-q>>>0,q>>>=0;for(var Kt=Array(St);++Ae0&&j(Br)?q>1?Nl(Br,q-1,j,Ae,St):(0,Na.Z)(St,Br):Ae||(St[St.length]=Br)}return St},wa=function ac(L){return null!=L&&L.length?Qu(L,1):[]},yc=function nc(L){return wl(je(L,void 0,wa),L+"")};var Gc=s(23359),ee=yc(function(L,q){var j={};if(null==L)return j;var Ae=!1;q=Wu(q,function(Kt){return Kt=N(Kt,L),Ae||(Ae=Kt.length>1),Kt}),(0,za.Z)(L,(0,Gc.Z)(L),j),Ae&&(j=(0,wt.Z)(j,7,ma));for(var St=q.length;St--;)As(j,q[St]);return j});const Ce=ee;const Gi=function Ur(L,q){for(var j=-1,Ae=null==L?0:L.length;++jBr))return!1;var ms=Kt.get(L),vs=Kt.get(q);if(ms&&vs)return ms==q&&vs==L;var Ks=-1,Vl=!0,Xu=2&j?new T_:void 0;for(Kt.set(L,q),Kt.set(q,L);++Ks-1?St[Kt?q[ur]:ur]:void 0}}(function wp(L,q,j){var Ae=null==L?0:L.length;if(!Ae)return-1;var St=null==j?0:wd(j);return St<0&&(St=w_(Ae+St,0)),ih(L,up(q),St)});const pp=sh;const Sf=function Ef(L){return"string"==typeof L||!(0,Jo.Z)(L)&&(0,ys.Z)(L)&&"[object String]"==(0,Rc.Z)(L)};var Vp=s(40309);const ah=function(){return Vp.Z.Date.now()};var qh=Math.max,N_=Math.min;const em=function Qc(L,q,j){var Ae=!0,St=!0;if("function"!=typeof L)throw new TypeError("Expected a function");return(0,wu.Z)(j)&&(Ae="leading"in j?!!j.leading:Ae,St="trailing"in j?!!j.trailing:St),function wh(L,q,j){var Ae,St,Kt,ur,Br,Ii,ms=0,vs=!1,Ks=!1,Vl=!0;if("function"!=typeof L)throw new TypeError("Expected a function");function Xu(Nd){var mp=Ae,wc=St;return Ae=St=void 0,ms=Nd,ur=L.apply(wc,mp)}function af(Nd){var mp=Nd-Ii;return void 0===Ii||mp>=q||mp<0||Ks&&Nd-ms>=Kt}function lf(){var Nd=ah();if(af(Nd))return m_(Nd);Br=setTimeout(lf,function Oc(Nd){var pd=q-(Nd-Ii);return Ks?N_(pd,Kt-(Nd-ms)):pd}(Nd))}function m_(Nd){return Br=void 0,Vl&&Ae?Xu(Nd):(Ae=St=void 0,ur)}function Wd(){var Nd=ah(),mp=af(Nd);if(Ae=arguments,St=this,Ii=Nd,mp){if(void 0===Br)return function Fu(Nd){return ms=Nd,Br=setTimeout(lf,q),vs?Xu(Nd):ur}(Ii);if(Ks)return clearTimeout(Br),Br=setTimeout(lf,q),Xu(Ii)}return void 0===Br&&(Br=setTimeout(lf,q)),ur}return q=Rp(q)||0,(0,wu.Z)(j)&&(vs=!!j.leading,Kt=(Ks="maxWait"in j)?qh(Rp(j.maxWait)||0,q):Kt,Vl="trailing"in j?!!j.trailing:Vl),Wd.cancel=function Hh(){void 0!==Br&&clearTimeout(Br),ms=0,Ae=Ii=St=Br=void 0},Wd.flush=function Uh(){return void 0===Br?ur:m_(ah())},Wd}(L,q,{leading:Ae,maxWait:q,trailing:St})},lh=function _p(L){return L!=L},Im=function im(L){return null==L?[]:function nm(L,q){return Wu(q,function(j){return L[j]})}(L,(0,hf.Z)(L))};var vd=Math.max;const ym=function uh(L,q,j,Ae){L=(0,rs.Z)(L)?L:Im(L),j=j&&!Ae?wd(j):0;var St=L.length;return j<0&&(j=vd(St+j,0)),Sf(L)?j<=St&&L.indexOf(q,j)>-1:!!St&&function Ph(L,q,j){return q==q?function F_(L,q,j){for(var Ae=j-1,St=L.length;++Ae-1};var Np=s(15427);const Cd=function ch(L,q,j,Ae){if(!(0,wu.Z)(L))return L;for(var St=-1,Kt=(q=N(q,L)).length,ur=Kt-1,Br=L;null!=Br&&++St{class L{constructor(j,Ae){this.templateRef=j,this.viewContainer=Ae,this.templateBindings={}}ngOnInit(){this.view=this.viewContainer.createEmbeddedView(this.templateRef),this.dispose&&this.dispose(),this.shouldDetach()&&this.view.detach(),this.autoDetect(this.view)}shouldDetach(){return this.treeMobxAutorun&&this.treeMobxAutorun.detach}autoDetect(j){this.dispose=vi(()=>j.detectChanges())}ngOnDestroy(){this.dispose&&this.dispose()}}return L.\u0275fac=function(j){return new(j||L)(r.Y36(r.Rgc),r.Y36(r.s_b))},L.\u0275dir=r.lG2({type:L,selectors:[["","treeMobxAutorun",""]],inputs:{treeMobxAutorun:"treeMobxAutorun"}}),L})();const Tf={TOGGLE_ACTIVE:(L,q,j)=>q&&q.toggleActivated(),TOGGLE_ACTIVE_MULTI:(L,q,j)=>q&&q.toggleActivated(!0),TOGGLE_SELECTED:(L,q,j)=>q&&q.toggleSelected(),ACTIVATE:(L,q,j)=>q.setIsActive(!0),DEACTIVATE:(L,q,j)=>q.setIsActive(!1),SELECT:(L,q,j)=>q.setIsSelected(!0),DESELECT:(L,q,j)=>q.setIsSelected(!1),FOCUS:(L,q,j)=>q.focus(),TOGGLE_EXPANDED:(L,q,j)=>q.hasChildren&&q.toggleExpanded(),EXPAND:(L,q,j)=>q.expand(),COLLAPSE:(L,q,j)=>q.collapse(),DRILL_DOWN:(L,q,j)=>L.focusDrillDown(),DRILL_UP:(L,q,j)=>L.focusDrillUp(),NEXT_NODE:(L,q,j)=>L.focusNextNode(),PREVIOUS_NODE:(L,q,j)=>L.focusPreviousNode(),MOVE_NODE:(L,q,j,{from:Ae,to:St})=>{j.ctrlKey?L.copyNode(Ae,St):L.moveNode(Ae,St)}},fh={mouse:{click:Tf.TOGGLE_ACTIVE,dblClick:null,contextMenu:null,expanderClick:Tf.TOGGLE_EXPANDED,checkboxClick:Tf.TOGGLE_SELECTED,drop:Tf.MOVE_NODE},keys:{39:Tf.DRILL_DOWN,37:Tf.DRILL_UP,40:Tf.NEXT_NODE,38:Tf.PREVIOUS_NODE,32:Tf.TOGGLE_ACTIVE,13:Tf.TOGGLE_ACTIVE}};class sm{constructor(q={}){this.options=q,this.actionMapping=gd({},this.options.actionMapping,fh),q.rtl&&(this.actionMapping.keys[39]=ft(q,["actionMapping","keys",39])||Tf.DRILL_UP,this.actionMapping.keys[37]=ft(q,["actionMapping","keys",37])||Tf.DRILL_DOWN)}get hasChildrenField(){return this.options.hasChildrenField||"hasChildren"}get childrenField(){return this.options.childrenField||"children"}get displayField(){return this.options.displayField||"name"}get idField(){return this.options.idField||"id"}get isExpandedField(){return this.options.isExpandedField||"isExpanded"}get getChildren(){return this.options.getChildren}get levelPadding(){return this.options.levelPadding||0}get useVirtualScroll(){return this.options.useVirtualScroll}get animateExpand(){return this.options.animateExpand}get animateSpeed(){return this.options.animateSpeed||1}get animateAcceleration(){return this.options.animateAcceleration||1.2}get scrollOnActivate(){return void 0===this.options.scrollOnActivate||this.options.scrollOnActivate}get rtl(){return!!this.options.rtl}get rootId(){return this.options.rootId}get useCheckbox(){return this.options.useCheckbox}get useTriState(){return void 0===this.options.useTriState||this.options.useTriState}get scrollContainer(){return this.options.scrollContainer}get allowDragoverStyling(){return void 0===this.options.allowDragoverStyling||this.options.allowDragoverStyling}getNodeClone(q){return this.options.getNodeClone?this.options.getNodeClone(q):Ce(Object.assign({},q.data),["id"])}allowDrop(q,j,Ae){return this.options.allowDrop instanceof Function?this.options.allowDrop(q,j,Ae):void 0===this.options.allowDrop||this.options.allowDrop}allowDrag(q){return this.options.allowDrag instanceof Function?this.options.allowDrag(q):this.options.allowDrag}nodeClass(q){return this.options.nodeClass?this.options.nodeClass(q):""}nodeHeight(q){if(q.data.virtual)return 0;let j=this.options.nodeHeight||22;return"function"==typeof j&&(j=j(q)),j+(0===q.index?2:1)*this.dropSlotHeight}get dropSlotHeight(){return function $t(L){return"number"==typeof L||(0,ys.Z)(L)&&"[object Number]"==(0,Rc.Z)(L)}(this.options.dropSlotHeight)?this.options.dropSlotHeight:2}}const nd={toggleExpanded:"toggleExpanded",activate:"activate",deactivate:"deactivate",nodeActivate:"nodeActivate",nodeDeactivate:"nodeDeactivate",select:"select",deselect:"deselect",focus:"focus",blur:"blur",initialized:"initialized",updateData:"updateData",moveNode:"moveNode",copyNode:"copyNode",event:"event",loadNodeChildren:"loadNodeChildren",changeFilter:"changeFilter",stateChange:"stateChange"};var Zd=function(L,q,j,Ae){var ur,St=arguments.length,Kt=St<3?q:null===Ae?Ae=Object.getOwnPropertyDescriptor(q,j):Ae;if("object"==typeof Reflect&&"function"==typeof Reflect.decorate)Kt=Reflect.decorate(L,q,j,Ae);else for(var Br=L.length-1;Br>=0;Br--)(ur=L[Br])&&(Kt=(St<3?ur(Kt):St>3?ur(q,j,Kt):ur(q,j))||Kt);return St>3&&Kt&&Object.defineProperty(q,j,Kt),Kt},hc=function(L,q){if("object"==typeof Reflect&&"function"==typeof Reflect.metadata)return Reflect.metadata(L,q)};let _g=(()=>{class L{constructor(j,Ae,St,Kt){this.data=j,this.parent=Ae,this.treeModel=St,this.position=0,this.allowDrop=(ur,Br)=>this.options.allowDrop(ur,{parent:this,index:0},Br),this.allowDragoverStyling=()=>this.options.allowDragoverStyling,null==this.id&&(this.id=function hg(){return Math.floor(1e13*Math.random())}()),this.index=Kt,this.getField("children")&&this._initChildren(),this.autoLoadChildren()}get isHidden(){return this.treeModel.isHidden(this)}get isExpanded(){return this.treeModel.isExpanded(this)}get isActive(){return this.treeModel.isActive(this)}get isFocused(){return this.treeModel.isNodeFocused(this)}get isSelected(){return this.isSelectable()?this.treeModel.isSelected(this):function Zs(L,q,j){var Ae=(0,Jo.Z)(L)?Gi:Tc;return j&&Uc(L,q,j)&&(q=void 0),Ae(L,up(q))}(this.children,j=>j.isSelected)}get isAllSelected(){return this.isSelectable()?this.treeModel.isSelected(this):function cp(L,q,j){var Ae=(0,Jo.Z)(L)?Cc:yf;return j&&Uc(L,q,j)&&(q=void 0),Ae(L,up(q))}(this.children,j=>j.isAllSelected)}get isPartiallySelected(){return this.isSelected&&!this.isAllSelected}get level(){return this.parent?this.parent.level+1:0}get path(){return this.parent?[...this.parent.path,this.id]:[]}get elementRef(){throw"Element Ref is no longer supported since introducing virtual scroll\n\n You may use a template to obtain a reference to the element"}get originalNode(){return this._originalNode}get hasChildren(){return!!(this.getField("hasChildren")||this.children&&this.children.length>0)}get isCollapsed(){return!this.isExpanded}get isLeaf(){return!this.hasChildren}get isRoot(){return this.parent.data.virtual}get realParent(){return this.isRoot?null:this.parent}get options(){return this.treeModel.options}fireEvent(j){this.treeModel.fireEvent(j)}get displayField(){return this.getField("display")}get id(){return this.getField("id")}set id(j){this.setField("id",j)}getField(j){return this.data[this.options[`${j}Field`]]}setField(j,Ae){this.data[this.options[`${j}Field`]]=Ae}_findAdjacentSibling(j,Ae=!1){const St=this._getParentsChildren(Ae),Kt=St.indexOf(this);return St.length>Kt+j?St[Kt+j]:null}findNextSibling(j=!1){return this._findAdjacentSibling(1,j)}findPreviousSibling(j=!1){return this._findAdjacentSibling(-1,j)}getVisibleChildren(){return this.visibleChildren}get visibleChildren(){return(this.children||[]).filter(j=>!j.isHidden)}getFirstChild(j=!1){return __((j?this.visibleChildren:this.children)||[])}getLastChild(j=!1){return Cn((j?this.visibleChildren:this.children)||[])}findNextNode(j=!0,Ae=!1){return j&&this.isExpanded&&this.getFirstChild(Ae)||this.findNextSibling(Ae)||this.parent&&this.parent.findNextNode(!1,Ae)}findPreviousNode(j=!1){let Ae=this.findPreviousSibling(j);return Ae?Ae._getLastOpenDescendant(j):this.realParent}_getLastOpenDescendant(j=!1){const Ae=this.getLastChild(j);return this.isCollapsed||!Ae?this:Ae._getLastOpenDescendant(j)}_getParentsChildren(j=!1){return this.parent&&(j?this.parent.getVisibleChildren():this.parent.children)||[]}getIndexInParent(j=!1){return this._getParentsChildren(j).indexOf(this)}isDescendantOf(j){return this===j||this.parent&&this.parent.isDescendantOf(j)}getNodePadding(){return this.options.levelPadding*(this.level-1)+"px"}getClass(){return[this.options.nodeClass(this),`tree-node-level-${this.level}`].join(" ")}onDrop(j){this.mouseAction("drop",j.event,{from:j.element,to:{parent:this,index:0,dropOnNode:!0}})}allowDrag(){return this.options.allowDrag(this)}loadNodeChildren(){return this.options.getChildren?Promise.resolve(this.options.getChildren(this)).then(j=>{j&&(this.setField("children",j),this._initChildren(),this.options.useTriState&&this.treeModel.isSelected(this)&&this.setIsSelected(!0),this.children.forEach(Ae=>{Ae.getField("isExpanded")&&Ae.hasChildren&&Ae.expand()}))}).then(()=>{this.fireEvent({eventName:nd.loadNodeChildren,node:this})}):Promise.resolve()}expand(){return this.isExpanded||this.toggleExpanded(),this}collapse(){return this.isExpanded&&this.toggleExpanded(),this}doForAll(j){Promise.resolve(j(this)).then(()=>{this.children&&this.children.forEach(Ae=>Ae.doForAll(j))})}expandAll(){this.doForAll(j=>j.expand())}collapseAll(){this.doForAll(j=>j.collapse())}ensureVisible(){return this.realParent&&(this.realParent.expand(),this.realParent.ensureVisible()),this}toggleExpanded(){return this.setIsExpanded(!this.isExpanded),this}setIsExpanded(j){return this.hasChildren&&this.treeModel.setExpandedNode(this,j),this}autoLoadChildren(){this.handler=ws(()=>this.isExpanded,j=>{!this.children&&this.hasChildren&&j&&this.loadNodeChildren()},{fireImmediately:!0})}dispose(){this.children&&this.children.forEach(j=>j.dispose()),this.handler&&this.handler(),this.parent=null,this.children=null}setIsActive(j,Ae=!1){return this.treeModel.setActiveNode(this,j,Ae),j&&this.focus(this.options.scrollOnActivate),this}isSelectable(){return this.isLeaf||!this.children||!this.options.useTriState}setIsSelected(j){return this.isSelectable()?this.treeModel.setSelectedNode(this,j):this.visibleChildren.forEach(Ae=>Ae.setIsSelected(j)),this}toggleSelected(){return this.setIsSelected(!this.isSelected),this}toggleActivated(j=!1){return this.setIsActive(!this.isActive,j),this}setActiveAndVisible(j=!1){return this.setIsActive(!0,j).ensureVisible(),setTimeout(this.scrollIntoView.bind(this)),this}scrollIntoView(j=!1){this.treeModel.virtualScroll.scrollIntoView(this,j)}focus(j=!0){let Ae=this.treeModel.getFocusedNode();return this.treeModel.setFocusedNode(this),j&&this.scrollIntoView(),Ae&&this.fireEvent({eventName:nd.blur,node:Ae}),this.fireEvent({eventName:nd.focus,node:this}),this}blur(){let j=this.treeModel.getFocusedNode();return this.treeModel.setFocusedNode(null),j&&this.fireEvent({eventName:nd.blur,node:this}),this}setIsHidden(j){this.treeModel.setIsHidden(this,j)}hide(){this.setIsHidden(!0)}show(){this.setIsHidden(!1)}mouseAction(j,Ae,St=null){this.treeModel.setFocus(!0);const ur=this.options.actionMapping.mouse[j];ur&&ur(this.treeModel,this,Ae,St)}getSelfHeight(){return this.options.nodeHeight(this)}_initChildren(){this.children=this.getField("children").map((j,Ae)=>new L(j,this,this.treeModel,Ae))}}return Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isHidden",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isExpanded",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isActive",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isFocused",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isSelected",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isAllSelected",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"isPartiallySelected",null),Zd([jt,hc("design:type",Array)],L.prototype,"children",void 0),Zd([jt,hc("design:type",Number)],L.prototype,"index",void 0),Zd([jt,hc("design:type",Object)],L.prototype,"position",void 0),Zd([jt,hc("design:type",Number)],L.prototype,"height",void 0),Zd([ze,hc("design:type",Number),hc("design:paramtypes",[])],L.prototype,"level",null),Zd([ze,hc("design:type",Array),hc("design:paramtypes",[])],L.prototype,"path",null),Zd([ze,hc("design:type",Object),hc("design:paramtypes",[])],L.prototype,"visibleChildren",null),Zd([ie,hc("design:type",Function),hc("design:paramtypes",[Object]),hc("design:returntype",void 0)],L.prototype,"setIsSelected",null),Zd([ie,hc("design:type",Function),hc("design:paramtypes",[]),hc("design:returntype",void 0)],L.prototype,"_initChildren",null),L})();var Iu=function(L,q,j,Ae){var ur,St=arguments.length,Kt=St<3?q:null===Ae?Ae=Object.getOwnPropertyDescriptor(q,j):Ae;if("object"==typeof Reflect&&"function"==typeof Reflect.decorate)Kt=Reflect.decorate(L,q,j,Ae);else for(var Br=L.length-1;Br>=0;Br--)(ur=L[Br])&&(Kt=(St<3?ur(Kt):St>3?ur(q,j,Kt):ur(q,j))||Kt);return St>3&&Kt&&Object.defineProperty(q,j,Kt),Kt},Es=function(L,q){if("object"==typeof Reflect&&"function"==typeof Reflect.metadata)return Reflect.metadata(L,q)};let gu=(()=>{class L{constructor(){this.options=new sm,this.eventNames=Object.keys(nd),this.expandedNodeIds={},this.selectedLeafNodeIds={},this.activeNodeIds={},this.hiddenNodeIds={},this.focusedNodeId=null,this.firstUpdate=!0,this.subscriptions=[]}fireEvent(j){j.treeModel=this,this.events[j.eventName].emit(j),this.events.event.emit(j)}subscribe(j,Ae){const St=this.events[j].subscribe(Ae);this.subscriptions.push(St)}getFocusedNode(){return this.focusedNode}getActiveNode(){return this.activeNodes[0]}getActiveNodes(){return this.activeNodes}getVisibleRoots(){return this.virtualRoot.visibleChildren}getFirstRoot(j=!1){return __(j?this.getVisibleRoots():this.roots)}getLastRoot(j=!1){return Cn(j?this.getVisibleRoots():this.roots)}get isFocused(){return L.focusedTree===this}isNodeFocused(j){return this.focusedNode===j}isEmptyTree(){return this.roots&&0===this.roots.length}get focusedNode(){return this.focusedNodeId?this.getNodeById(this.focusedNodeId):null}get expandedNodes(){const j=Object.keys(this.expandedNodeIds).filter(Ae=>this.expandedNodeIds[Ae]).map(Ae=>this.getNodeById(Ae));return zf(j)}get activeNodes(){const j=Object.keys(this.activeNodeIds).filter(Ae=>this.activeNodeIds[Ae]).map(Ae=>this.getNodeById(Ae));return zf(j)}get hiddenNodes(){const j=Object.keys(this.hiddenNodeIds).filter(Ae=>this.hiddenNodeIds[Ae]).map(Ae=>this.getNodeById(Ae));return zf(j)}get selectedLeafNodes(){const j=Object.keys(this.selectedLeafNodeIds).filter(Ae=>this.selectedLeafNodeIds[Ae]).map(Ae=>this.getNodeById(Ae));return zf(j)}getNodeByPath(j,Ae=null){if(!j)return null;if(Ae=Ae||this.virtualRoot,0===j.length)return Ae;if(!Ae.children)return null;const St=j.shift(),Kt=pp(Ae.children,{id:St});return Kt?this.getNodeByPath(j,Kt):null}getNodeById(j){const Ae=j.toString();return this.getNodeBy(St=>St.id.toString()===Ae)}getNodeBy(j,Ae=null){if(!(Ae=Ae||this.virtualRoot).children)return null;const St=pp(Ae.children,j);if(St)return St;for(let Kt of Ae.children){const ur=this.getNodeBy(j,Kt);if(ur)return ur}}isExpanded(j){return this.expandedNodeIds[j.id]}isHidden(j){return this.hiddenNodeIds[j.id]}isActive(j){return this.activeNodeIds[j.id]}isSelected(j){return this.selectedLeafNodeIds[j.id]}ngOnDestroy(){this.dispose(),this.unsubscribeAll()}dispose(){this.virtualRoot&&this.virtualRoot.dispose()}unsubscribeAll(){this.subscriptions.forEach(j=>j.unsubscribe()),this.subscriptions=[]}setData({nodes:j,options:Ae=null,events:St=null}){Ae&&(this.options=new sm(Ae)),St&&(this.events=St),j&&(this.nodes=j),this.update()}update(){let j={id:this.options.rootId,virtual:!0,[this.options.childrenField]:this.nodes};this.dispose(),this.virtualRoot=new _g(j,null,this,0),this.roots=this.virtualRoot.children,this.firstUpdate?this.roots&&(this.firstUpdate=!1,this._calculateExpandedNodes()):this.fireEvent({eventName:nd.updateData})}setFocusedNode(j){this.focusedNodeId=j?j.id:null}setFocus(j){L.focusedTree=j?this:null}doForAll(j){this.roots.forEach(Ae=>Ae.doForAll(j))}focusNextNode(){let j=this.getFocusedNode(),Ae=j?j.findNextNode(!0,!0):this.getFirstRoot(!0);Ae&&Ae.focus()}focusPreviousNode(){let j=this.getFocusedNode(),Ae=j?j.findPreviousNode(!0):this.getLastRoot(!0);Ae&&Ae.focus()}focusDrillDown(){let j=this.getFocusedNode();if(j&&j.isCollapsed&&j.hasChildren)j.toggleExpanded();else{let Ae=j?j.getFirstChild(!0):this.getFirstRoot(!0);Ae&&Ae.focus()}}focusDrillUp(){let j=this.getFocusedNode();if(j)if(j.isExpanded)j.toggleExpanded();else{let Ae=j.realParent;Ae&&Ae.focus()}}setActiveNode(j,Ae,St=!1){St?this._setActiveNodeMulti(j,Ae):this._setActiveNodeSingle(j,Ae),Ae?(j.focus(this.options.scrollOnActivate),this.fireEvent({eventName:nd.activate,node:j}),this.fireEvent({eventName:nd.nodeActivate,node:j})):(this.fireEvent({eventName:nd.deactivate,node:j}),this.fireEvent({eventName:nd.nodeDeactivate,node:j}))}setSelectedNode(j,Ae){this.selectedLeafNodeIds=Object.assign({},this.selectedLeafNodeIds,{[j.id]:Ae}),Ae?(j.focus(),this.fireEvent({eventName:nd.select,node:j})):this.fireEvent({eventName:nd.deselect,node:j})}setExpandedNode(j,Ae){this.expandedNodeIds=Object.assign({},this.expandedNodeIds,{[j.id]:Ae}),this.fireEvent({eventName:nd.toggleExpanded,node:j,isExpanded:Ae})}expandAll(){this.roots.forEach(j=>j.expandAll())}collapseAll(){this.roots.forEach(j=>j.collapseAll())}setIsHidden(j,Ae){this.hiddenNodeIds=Object.assign({},this.hiddenNodeIds,{[j.id]:Ae})}setHiddenNodeIds(j){this.hiddenNodeIds=j.reduce((Ae,St)=>Object.assign(Ae,{[St]:!0}),{})}performKeyAction(j,Ae){const St=this.options.actionMapping.keys[Ae.keyCode];return!!St&&(Ae.preventDefault(),St(this,j,Ae),!0)}filterNodes(j,Ae=!0){let St;if(!j)return this.clearFilter();if(Sf(j))St=ur=>-1!==ur.displayField.toLowerCase().indexOf(j.toLowerCase());else{if(!(0,mu.Z)(j))return console.error("Don't know what to do with filter",j),void console.error("Should be either a string or function");St=j}const Kt={};this.roots.forEach(ur=>this._filterNode(Kt,ur,St,Ae)),this.hiddenNodeIds=Kt,this.fireEvent({eventName:nd.changeFilter})}clearFilter(){this.hiddenNodeIds={},this.fireEvent({eventName:nd.changeFilter})}moveNode(j,Ae){const St=j.getIndexInParent(),Kt=j.parent;if(!this.canMoveNode(j,Ae,St))return;const ur=Kt.getField("children");Ae.parent.getField("children")||Ae.parent.setField("children",[]);const Br=Ae.parent.getField("children"),Ii=ur.splice(St,1)[0];let ms=Kt===Ae.parent&&Ae.index>St?Ae.index-1:Ae.index;Br.splice(ms,0,Ii),Kt.treeModel.update(),Ae.parent.treeModel!==Kt.treeModel&&Ae.parent.treeModel.update(),this.fireEvent({eventName:nd.moveNode,node:Ii,to:{parent:Ae.parent.data,index:ms},from:{parent:Kt.data,index:St}})}copyNode(j,Ae){const St=j.getIndexInParent();if(!this.canMoveNode(j,Ae,St))return;Ae.parent.getField("children")||Ae.parent.setField("children",[]);const Kt=Ae.parent.getField("children"),ur=this.options.getNodeClone(j);Kt.splice(Ae.index,0,ur),j.treeModel.update(),Ae.parent.treeModel!==j.treeModel&&Ae.parent.treeModel.update(),this.fireEvent({eventName:nd.copyNode,node:ur,to:{parent:Ae.parent.data,index:Ae.index}})}getState(){return{expandedNodeIds:this.expandedNodeIds,selectedLeafNodeIds:this.selectedLeafNodeIds,activeNodeIds:this.activeNodeIds,hiddenNodeIds:this.hiddenNodeIds,focusedNodeId:this.focusedNodeId}}setState(j){j&&Object.assign(this,{expandedNodeIds:j.expandedNodeIds||{},selectedLeafNodeIds:j.selectedLeafNodeIds||{},activeNodeIds:j.activeNodeIds||{},hiddenNodeIds:j.hiddenNodeIds||{},focusedNodeId:j.focusedNodeId})}subscribeToState(j){vi(()=>j(this.getState()))}canMoveNode(j,Ae,St){return St||j.getIndexInParent(),(j.parent!==Ae.parent||St!==Ae.index)&&!Ae.parent.isDescendantOf(j)}calculateExpandedNodes(){this._calculateExpandedNodes()}_filterNode(j,Ae,St,Kt){let ur=St(Ae);return Ae.children&&Ae.children.forEach(Br=>{this._filterNode(j,Br,St,Kt)&&(ur=!0)}),ur||(j[Ae.id]=!0),Kt&&ur&&Ae.ensureVisible(),ur}_calculateExpandedNodes(j=null){(j=j||this.virtualRoot).data[this.options.isExpandedField]&&(this.expandedNodeIds=Object.assign({},this.expandedNodeIds,{[j.id]:!0})),j.children&&j.children.forEach(Ae=>this._calculateExpandedNodes(Ae))}_setActiveNodeSingle(j,Ae){this.activeNodes.filter(St=>St!==j).forEach(St=>{this.fireEvent({eventName:nd.deactivate,node:St}),this.fireEvent({eventName:nd.nodeDeactivate,node:St})}),this.activeNodeIds=Ae?{[j.id]:!0}:{}}_setActiveNodeMulti(j,Ae){this.activeNodeIds=Object.assign({},this.activeNodeIds,{[j.id]:Ae})}}return L.\u0275fac=function(j){return new(j||L)},L.\u0275prov=r.Yz7({token:L,factory:L.\u0275fac}),L.focusedTree=null,L})();Iu([jt,Es("design:type",Array)],gu.prototype,"roots",void 0),Iu([jt,Es("design:type",Object)],gu.prototype,"expandedNodeIds",void 0),Iu([jt,Es("design:type",Object)],gu.prototype,"selectedLeafNodeIds",void 0),Iu([jt,Es("design:type",Object)],gu.prototype,"activeNodeIds",void 0),Iu([jt,Es("design:type",Object)],gu.prototype,"hiddenNodeIds",void 0),Iu([jt,Es("design:type",Object)],gu.prototype,"focusedNodeId",void 0),Iu([jt,Es("design:type",_g)],gu.prototype,"virtualRoot",void 0),Iu([ze,Es("design:type",Object),Es("design:paramtypes",[])],gu.prototype,"focusedNode",null),Iu([ze,Es("design:type",Object),Es("design:paramtypes",[])],gu.prototype,"expandedNodes",null),Iu([ze,Es("design:type",Object),Es("design:paramtypes",[])],gu.prototype,"activeNodes",null),Iu([ze,Es("design:type",Object),Es("design:paramtypes",[])],gu.prototype,"hiddenNodes",null),Iu([ze,Es("design:type",Object),Es("design:paramtypes",[])],gu.prototype,"selectedLeafNodes",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object]),Es("design:returntype",void 0)],gu.prototype,"setData",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"update",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object]),Es("design:returntype",void 0)],gu.prototype,"setFocusedNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object]),Es("design:returntype",void 0)],gu.prototype,"setFocus",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object]),Es("design:returntype",void 0)],gu.prototype,"doForAll",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"focusNextNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"focusPreviousNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"focusDrillDown",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"focusDrillUp",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object,Object]),Es("design:returntype",void 0)],gu.prototype,"setActiveNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object]),Es("design:returntype",void 0)],gu.prototype,"setSelectedNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object]),Es("design:returntype",void 0)],gu.prototype,"setExpandedNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"expandAll",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"collapseAll",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object]),Es("design:returntype",void 0)],gu.prototype,"setIsHidden",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object]),Es("design:returntype",void 0)],gu.prototype,"setHiddenNodeIds",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object]),Es("design:returntype",void 0)],gu.prototype,"filterNodes",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[]),Es("design:returntype",void 0)],gu.prototype,"clearFilter",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object]),Es("design:returntype",void 0)],gu.prototype,"moveNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object,Object]),Es("design:returntype",void 0)],gu.prototype,"copyNode",null),Iu([ie,Es("design:type",Function),Es("design:paramtypes",[Object]),Es("design:returntype",void 0)],gu.prototype,"setState",null);let km=(()=>{class L{constructor(){this._draggedElement=null}set(j){this._draggedElement=j}get(){return this._draggedElement}isDragging(){return!!this.get()}}return L.\u0275fac=function(j){return new(j||L)},L.\u0275prov=(0,r.Yz7)({factory:function(){return new L},token:L,providedIn:"root"}),L})();var k_=function(L,q,j,Ae){var ur,St=arguments.length,Kt=St<3?q:null===Ae?Ae=Object.getOwnPropertyDescriptor(q,j):Ae;if("object"==typeof Reflect&&"function"==typeof Reflect.decorate)Kt=Reflect.decorate(L,q,j,Ae);else for(var Br=L.length-1;Br>=0;Br--)(ur=L[Br])&&(Kt=(St<3?ur(Kt):St>3?ur(q,j,Kt):ur(q,j))||Kt);return St>3&&Kt&&Object.defineProperty(q,j,Kt),Kt},Pd=function(L,q){if("object"==typeof Reflect&&"function"==typeof Reflect.metadata)return Reflect.metadata(L,q)};let Fp=(()=>{class L{constructor(j){this.treeModel=j,this.yBlocks=0,this.x=0,this.viewportHeight=null,this.viewport=null,j.virtualScroll=this,this._dispose=[vi(()=>this.fixScroll())]}get y(){return 150*this.yBlocks}get totalHeight(){return this.treeModel.virtualRoot?this.treeModel.virtualRoot.height:0}fireEvent(j){this.treeModel.fireEvent(j)}init(){const j=this.recalcPositions.bind(this);j(),this._dispose=[...this._dispose,ws(()=>this.treeModel.roots,j),ws(()=>this.treeModel.expandedNodeIds,j),ws(()=>this.treeModel.hiddenNodeIds,j)],this.treeModel.subscribe(nd.loadNodeChildren,j)}isEnabled(){return this.treeModel.options.useVirtualScroll}_setYBlocks(j){this.yBlocks=j}recalcPositions(){this.treeModel.virtualRoot.height=this._getPositionAfter(this.treeModel.getVisibleRoots(),0)}_getPositionAfter(j,Ae){let St=Ae;return j.forEach(Kt=>{Kt.position=St,St=this._getPositionAfterNode(Kt,St)}),St}_getPositionAfterNode(j,Ae){let St=j.getSelfHeight()+Ae;return j.children&&j.isExpanded&&(St=this._getPositionAfter(j.visibleChildren,St)),j.height=St-Ae,St}clear(){this._dispose.forEach(j=>j())}setViewport(j){Object.assign(this,{viewport:j,x:j.scrollLeft,yBlocks:Math.round(j.scrollTop/150),viewportHeight:j.getBoundingClientRect?j.getBoundingClientRect().height:0})}scrollIntoView(j,Ae,St=!0){if(j.options.scrollContainer){const Kt=j.options.scrollContainer,ur=Kt.getBoundingClientRect().height,Br=Kt.getBoundingClientRect().top,Ii=this.viewport.getBoundingClientRect().top+j.position-Br;(Ae||IiKt.scrollTop+ur)&&(Kt.scrollTop=St?Ii-ur/2:Ii)}else(Ae||j.positionthis.y+this.viewportHeight)&&this.viewport&&(this.viewport.scrollTop=St?j.position-this.viewportHeight/2:j.position,this._setYBlocks(Math.floor(this.viewport.scrollTop/150)))}getViewportNodes(j){if(!j)return[];const Ae=j.filter(ms=>!ms.isHidden);if(!this.isEnabled())return Ae;if(!this.viewportHeight||!Ae.length)return[];const St=Lg(Ae,ms=>ms.position+500>this.y||ms.position+ms.height>this.y),Kt=Lg(Ae,ms=>ms.position-500>this.y+this.viewportHeight,St),ur=[];if(Kt-St>(1e3+this.viewportHeight)/Ae[0].treeModel.options.options.nodeHeight)return[];for(let ms=St;ms<=Kt;ms++)ur.push(Ae[ms]);return ur}fixScroll(){const j=Math.max(0,this.totalHeight-this.viewportHeight);this.y<0&&this._setYBlocks(0),this.y>j&&this._setYBlocks(j/150)}}return L.\u0275fac=function(j){return new(j||L)(r.LFG(gu))},L.\u0275prov=r.Yz7({token:L,factory:L.\u0275fac}),L})();function Lg(L,q,j=0){let Ae=j,St=L.length-1;for(;Ae!==St;){let Kt=Math.floor((Ae+St)/2);q(L[Kt])?St=Kt:Ae=Ae===Kt?St:Kt}return Ae}k_([jt,Pd("design:type",Object)],Fp.prototype,"yBlocks",void 0),k_([jt,Pd("design:type",Object)],Fp.prototype,"x",void 0),k_([jt,Pd("design:type",Object)],Fp.prototype,"viewportHeight",void 0),k_([ze,Pd("design:type",Object),Pd("design:paramtypes",[])],Fp.prototype,"y",null),k_([ze,Pd("design:type",Object),Pd("design:paramtypes",[])],Fp.prototype,"totalHeight",null),k_([ie,Pd("design:type",Function),Pd("design:paramtypes",[Object]),Pd("design:returntype",void 0)],Fp.prototype,"_setYBlocks",null),k_([ie,Pd("design:type",Function),Pd("design:paramtypes",[]),Pd("design:returntype",void 0)],Fp.prototype,"recalcPositions",null),k_([ie,Pd("design:type",Function),Pd("design:paramtypes",[Object]),Pd("design:returntype",void 0)],Fp.prototype,"setViewport",null),k_([ie,Pd("design:type",Function),Pd("design:paramtypes",[Object,Object,Object]),Pd("design:returntype",void 0)],Fp.prototype,"scrollIntoView",null);let S1=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-loading-component"]],inputs:{template:"template",node:"node"},decls:2,vars:5,consts:[[4,"ngIf"],[3,"ngTemplateOutlet","ngTemplateOutletContext"]],template:function(j,Ae){1&j&&(r.YNc(0,I,2,0,"span",0),r.GkF(1,1)),2&j&&(r.Q6J("ngIf",!Ae.template),r.xp6(1),r.Q6J("ngTemplateOutlet",Ae.template)("ngTemplateOutletContext",r.VKq(3,re,Ae.node)))},dependencies:[a.O5,a.tP],encapsulation:2}),L})(),Hm=(()=>{class L{constructor(j,Ae,St){this.elementRef=j,this.ngZone=Ae,this.virtualScroll=St,this.setViewport=em(()=>{this.virtualScroll.setViewport(this.elementRef.nativeElement)},17),this.scrollEventHandler=this.setViewport.bind(this)}ngOnInit(){this.virtualScroll.init()}ngAfterViewInit(){setTimeout(()=>{this.setViewport(),this.virtualScroll.fireEvent({eventName:nd.initialized})});let j=this.elementRef.nativeElement;this.ngZone.runOutsideAngular(()=>{j.addEventListener("scroll",this.scrollEventHandler)})}ngOnDestroy(){this.virtualScroll.clear(),this.elementRef.nativeElement.removeEventListener("scroll",this.scrollEventHandler)}getTotalHeight(){return this.virtualScroll.isEnabled()&&this.virtualScroll.totalHeight+"px"||"auto"}}return L.\u0275fac=function(j){return new(j||L)(r.Y36(r.SBq),r.Y36(r.R0b),r.Y36(Fp))},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-viewport"]],features:[r._Bn([Fp])],ngContentSelectors:Oe,decls:1,vars:2,consts:[[4,"treeMobxAutorun"]],template:function(j,Ae){1&j&&(r.F$t(),r.YNc(0,S,3,2,"ng-container",0)),2&j&&r.Q6J("treeMobxAutorun",r.DdM(1,z))},dependencies:[si],encapsulation:2}),L})(),b1=(()=>{class L{constructor(j,Ae){this.treeModel=j,this.treeDraggedElement=Ae,j.eventNames.forEach(St=>this[St]=new r.vpe),j.subscribeToState(St=>this.stateChange.emit(St))}set nodes(j){}set options(j){}set focused(j){this.treeModel.setFocus(j)}set state(j){this.treeModel.setState(j)}onKeydown(j){if(!this.treeModel.isFocused||ym(["input","textarea"],document.activeElement.tagName.toLowerCase()))return;const Ae=this.treeModel.getFocusedNode();this.treeModel.performKeyAction(Ae,j)}onMousedown(j){(function Ae(St,Kt){return!St||St.localName!==Kt&&Ae(St.parentElement,Kt)})(j.target,"tree-root")&&this.treeModel.setFocus(!1)}ngOnChanges(j){(j.options||j.nodes)&&this.treeModel.setData({options:j.options&&j.options.currentValue,nodes:j.nodes&&j.nodes.currentValue,events:L_(this,this.treeModel.eventNames)})}sizeChanged(){this.viewportComponent.setViewport()}}return L.\u0275fac=function(j){return new(j||L)(r.Y36(gu),r.Y36(km))},L.\u0275cmp=r.Xpm({type:L,selectors:[["Tree"],["tree-root"]],contentQueries:function(j,Ae,St){if(1&j&&(r.Suo(St,ut,5),r.Suo(St,On,5),r.Suo(St,Ar,5),r.Suo(St,ri,5)),2&j){let Kt;r.iGM(Kt=r.CRH())&&(Ae.loadingTemplate=Kt.first),r.iGM(Kt=r.CRH())&&(Ae.treeNodeTemplate=Kt.first),r.iGM(Kt=r.CRH())&&(Ae.treeNodeWrapperTemplate=Kt.first),r.iGM(Kt=r.CRH())&&(Ae.treeNodeFullTemplate=Kt.first)}},viewQuery:function(j,Ae){if(1&j&&r.Gf(Di,5),2&j){let St;r.iGM(St=r.CRH())&&(Ae.viewportComponent=St.first)}},hostBindings:function(j,Ae){1&j&&r.NdJ("keydown",function(Kt){return Ae.onKeydown(Kt)},!1,r.pYS)("mousedown",function(Kt){return Ae.onMousedown(Kt)},!1,r.pYS)},inputs:{nodes:"nodes",options:"options",focused:"focused",state:"state"},outputs:{toggleExpanded:"toggleExpanded",activate:"activate",deactivate:"deactivate",nodeActivate:"nodeActivate",nodeDeactivate:"nodeDeactivate",select:"select",deselect:"deselect",focus:"focus",blur:"blur",updateData:"updateData",initialized:"initialized",moveNode:"moveNode",copyNode:"copyNode",loadNodeChildren:"loadNodeChildren",changeFilter:"changeFilter",event:"event",stateChange:"stateChange"},features:[r._Bn([gu]),r.TTD],decls:5,vars:6,consts:[["viewport",""],[1,"angular-tree-component"],[3,"nodes","treeModel","templates",4,"ngIf"],["class","empty-tree-drop-slot",3,"dropIndex","node",4,"ngIf"],[3,"nodes","treeModel","templates"],[1,"empty-tree-drop-slot",3,"dropIndex","node"]],template:function(j,Ae){1&j&&(r.TgZ(0,"tree-viewport",null,0)(2,"div",1),r.YNc(3,cs,1,8,"tree-node-collection",2),r.YNc(4,Yo,1,2,"tree-node-drop-slot",3),r.qZA()()),2&j&&(r.xp6(2),r.ekj("node-dragging",Ae.treeDraggedElement.isDragging())("angular-tree-component-rtl",Ae.treeModel.options.rtl),r.xp6(1),r.Q6J("ngIf",Ae.treeModel.roots),r.xp6(1),r.Q6J("ngIf",Ae.treeModel.isEmptyTree()))},dependencies:function(){return[a.O5,Em,lm,Hm]},encapsulation:2}),L})(),mg=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["TreeNode"],["tree-node"]],inputs:{node:"node",index:"index",templates:"templates"},decls:1,vars:2,consts:[[4,"treeMobxAutorun"],[3,"class","tree-node","tree-node-expanded","tree-node-collapsed","tree-node-leaf","tree-node-active","tree-node-focused",4,"ngIf"],[3,"ngTemplateOutlet","ngTemplateOutletContext"],[3,"dropIndex","node",4,"ngIf"],[3,"node","index","templates"],[3,"node","templates"],[3,"dropIndex","node"]],template:function(j,Ae){1&j&&r.YNc(0,be,3,8,"ng-container",0),2&j&&r.Q6J("treeMobxAutorun",r.DdM(1,z))},dependencies:function(){return[a.O5,a.tP,Il,Em,yg,si]},encapsulation:2}),L})(),kg=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-node-content"]],inputs:{node:"node",index:"index",template:"template"},decls:2,vars:7,consts:[[4,"ngIf"],[3,"ngTemplateOutlet","ngTemplateOutletContext"]],template:function(j,Ae){1&j&&(r.YNc(0,Ke,2,1,"span",0),r.GkF(1,1)),2&j&&(r.Q6J("ngIf",!Ae.template),r.xp6(1),r.Q6J("ngTemplateOutlet",Ae.template)("ngTemplateOutletContext",r.kEZ(3,xt,Ae.node,Ae.node,Ae.index)))},dependencies:[a.O5,a.tP],encapsulation:2}),L})(),Em=(()=>{class L{onDrop(j){this.node.mouseAction("drop",j.event,{from:j.element,to:{parent:this.node,index:this.dropIndex}})}allowDrop(j,Ae){return this.node.options.allowDrop(j,{parent:this.node,index:this.dropIndex},Ae)}}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["TreeNodeDropSlot"],["tree-node-drop-slot"]],inputs:{node:"node",dropIndex:"dropIndex"},decls:1,vars:2,consts:[[1,"node-drop-slot",3,"treeAllowDrop","allowDragoverStyling","treeDrop"]],template:function(j,Ae){1&j&&(r.TgZ(0,"div",0),r.NdJ("treeDrop",function(Kt){return Ae.onDrop(Kt)}),r.qZA()),2&j&&r.Q6J("treeAllowDrop",Ae.allowDrop.bind(Ae))("allowDragoverStyling",!0)},dependencies:function(){return[_h]},encapsulation:2}),L})(),$g=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-node-expander"]],inputs:{node:"node"},decls:1,vars:2,consts:[[4,"treeMobxAutorun"],["class","toggle-children-wrapper",3,"toggle-children-wrapper-expanded","toggle-children-wrapper-collapsed","click",4,"ngIf"],["class","toggle-children-placeholder",4,"ngIf"],[1,"toggle-children-wrapper",3,"click"],[1,"toggle-children"],[1,"toggle-children-placeholder"]],template:function(j,Ae){1&j&&r.YNc(0,vr,3,2,"ng-container",0),2&j&&r.Q6J("treeMobxAutorun",r.DdM(1,z))},dependencies:[a.O5,si],encapsulation:2}),L})(),Il=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-node-children"]],inputs:{node:"node",templates:"templates"},decls:1,vars:2,consts:[[4,"treeMobxAutorun"],[3,"tree-children","tree-children-no-padding",4,"treeAnimateOpen","treeAnimateOpenSpeed","treeAnimateOpenAcceleration","treeAnimateOpenEnabled"],[3,"nodes","templates","treeModel",4,"ngIf"],["class","tree-node-loading",3,"padding-left","template","node",4,"ngIf"],[3,"nodes","templates","treeModel"],[1,"tree-node-loading",3,"template","node"]],template:function(j,Ae){1&j&&r.YNc(0,Qi,2,4,"ng-container",0),2&j&&r.Q6J("treeMobxAutorun",r.DdM(1,z))},dependencies:function(){return[a.O5,S1,lm,O1,si]},encapsulation:2}),L})();const vg=Object.assign(function gg(...L){return ie(...L)},ie),T1=Object.assign(function Hg(...L){return ze(...L)},ze),am=Object.assign(function C1(...L){return jt(...L)},jt);var $h=function(L,q,j,Ae){var ur,St=arguments.length,Kt=St<3?q:null===Ae?Ae=Object.getOwnPropertyDescriptor(q,j):Ae;if("object"==typeof Reflect&&"function"==typeof Reflect.decorate)Kt=Reflect.decorate(L,q,j,Ae);else for(var Br=L.length-1;Br>=0;Br--)(ur=L[Br])&&(Kt=(St<3?ur(Kt):St>3?ur(q,j,Kt):ur(q,j))||Kt);return St>3&&Kt&&Object.defineProperty(q,j,Kt),Kt},ph=function(L,q){if("object"==typeof Reflect&&"function"==typeof Reflect.metadata)return Reflect.metadata(L,q)};let lm=(()=>{class L{constructor(){this._dispose=[]}get nodes(){return this._nodes}set nodes(j){this.setNodes(j)}get marginTop(){const j=this.viewportNodes&&this.viewportNodes.length&&this.viewportNodes[0];return(j&&j.parent?j.position-j.parent.position-j.parent.getSelfHeight():0)+"px"}setNodes(j){this._nodes=j}ngOnInit(){this.virtualScroll=this.treeModel.virtualScroll,this._dispose=[ws(()=>this.virtualScroll.getViewportNodes(this.nodes).map(j=>j.index),j=>{this.viewportNodes=j.map(Ae=>this.nodes[Ae])},{compareStructural:!0,fireImmediately:!0}),ws(()=>this.nodes,j=>{this.viewportNodes=this.virtualScroll.getViewportNodes(j)})]}ngOnDestroy(){this._dispose.forEach(j=>j())}trackNode(j,Ae){return Ae.id}}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-node-collection"]],inputs:{nodes:"nodes",treeModel:"treeModel",templates:"templates"},decls:1,vars:2,consts:[[4,"treeMobxAutorun"],[3,"node","index","templates",4,"ngFor","ngForOf","ngForTrackBy"],[3,"node","index","templates"]],template:function(j,Ae){1&j&&r.YNc(0,ia,3,4,"ng-container",0),2&j&&r.Q6J("treeMobxAutorun",r.DdM(1,z))},dependencies:[a.sg,mg,si],encapsulation:2}),L})();$h([am,ph("design:type",Object)],lm.prototype,"_nodes",void 0),$h([am,ph("design:type",Array)],lm.prototype,"viewportNodes",void 0),$h([T1,ph("design:type",String),ph("design:paramtypes",[])],lm.prototype,"marginTop",null),$h([vg,ph("design:type",Function),ph("design:paramtypes",[Object]),ph("design:returntype",void 0)],lm.prototype,"setNodes",null);let yg=(()=>{class L{constructor(){}}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-node-wrapper"]],inputs:{node:"node",index:"index",templates:"templates"},decls:2,vars:8,consts:[["class","node-wrapper",3,"padding-left",4,"ngIf"],[3,"ngTemplateOutlet","ngTemplateOutletContext"],[1,"node-wrapper"],[3,"node",4,"ngIf"],[3,"node"],[1,"node-content-wrapper",3,"treeAllowDrop","allowDragoverStyling","treeDrag","treeDragEnabled","click","dblclick","mouseover","mouseout","contextmenu","treeDrop","treeDropDragOver","treeDropDragLeave","treeDropDragEnter"],[3,"node","index","template"]],template:function(j,Ae){1&j&&(r.YNc(0,di,5,15,"div",0),r.GkF(1,1)),2&j&&(r.Q6J("ngIf",!Ae.templates.treeNodeWrapperTemplate),r.xp6(1),r.Q6J("ngTemplateOutlet",Ae.templates.treeNodeWrapperTemplate)("ngTemplateOutletContext",r.l5B(3,Y,Ae.node,Ae.node,Ae.index,Ae.templates)))},dependencies:function(){return[a.O5,a.tP,kg,_h,bg,$g,M1]},encapsulation:2}),L})(),M1=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275cmp=r.Xpm({type:L,selectors:[["tree-node-checkbox"]],inputs:{node:"node"},decls:1,vars:2,consts:[[4,"treeMobxAutorun"],["type","checkbox",1,"tree-node-checkbox",3,"checked","indeterminate","click"]],template:function(j,Ae){1&j&&r.YNc(0,Wr,2,2,"ng-container",0),2&j&&r.Q6J("treeMobxAutorun",r.DdM(1,z))},dependencies:[si],encapsulation:2}),L})();const Eg="is-dragging-over",Sg="is-dragging-over-disabled";let _h=(()=>{class L{constructor(j,Ae,St,Kt){this.el=j,this.renderer=Ae,this.treeDraggedElement=St,this.ngZone=Kt,this.allowDragoverStyling=!0,this.onDropCallback=new r.vpe,this.onDragOverCallback=new r.vpe,this.onDragLeaveCallback=new r.vpe,this.onDragEnterCallback=new r.vpe,this._allowDrop=(ur,Br)=>!0,this.dragOverEventHandler=this.onDragOver.bind(this),this.dragEnterEventHandler=this.onDragEnter.bind(this),this.dragLeaveEventHandler=this.onDragLeave.bind(this)}set treeAllowDrop(j){this._allowDrop=j instanceof Function?j:(Ae,St)=>j}allowDrop(j){return this._allowDrop(this.treeDraggedElement.get(),j)}ngAfterViewInit(){let j=this.el.nativeElement;this.ngZone.runOutsideAngular(()=>{j.addEventListener("dragover",this.dragOverEventHandler),j.addEventListener("dragenter",this.dragEnterEventHandler),j.addEventListener("dragleave",this.dragLeaveEventHandler)})}ngOnDestroy(){let j=this.el.nativeElement;j.removeEventListener("dragover",this.dragOverEventHandler),j.removeEventListener("dragenter",this.dragEnterEventHandler),j.removeEventListener("dragleave",this.dragLeaveEventHandler)}onDragOver(j){if(!this.allowDrop(j))return this.allowDragoverStyling?this.addDisabledClass():void 0;this.onDragOverCallback.emit({event:j,element:this.treeDraggedElement.get()}),j.preventDefault(),this.allowDragoverStyling&&this.addClass()}onDragEnter(j){this.allowDrop(j)&&(j.preventDefault(),this.onDragEnterCallback.emit({event:j,element:this.treeDraggedElement.get()}))}onDragLeave(j){if(!this.allowDrop(j))return this.allowDragoverStyling?this.removeDisabledClass():void 0;this.onDragLeaveCallback.emit({event:j,element:this.treeDraggedElement.get()}),this.allowDragoverStyling&&this.removeClass()}onDrop(j){this.allowDrop(j)&&(j.preventDefault(),this.onDropCallback.emit({event:j,element:this.treeDraggedElement.get()}),this.allowDragoverStyling&&this.removeClass(),this.treeDraggedElement.set(null))}addClass(){this.renderer.addClass(this.el.nativeElement,Eg)}removeClass(){this.renderer.removeClass(this.el.nativeElement,Eg)}addDisabledClass(){this.renderer.addClass(this.el.nativeElement,Sg)}removeDisabledClass(){this.renderer.removeClass(this.el.nativeElement,Sg)}}return L.\u0275fac=function(j){return new(j||L)(r.Y36(r.SBq),r.Y36(r.Qsj),r.Y36(km),r.Y36(r.R0b))},L.\u0275dir=r.lG2({type:L,selectors:[["","treeDrop",""]],hostBindings:function(j,Ae){1&j&&r.NdJ("drop",function(Kt){return Ae.onDrop(Kt)})},inputs:{allowDragoverStyling:"allowDragoverStyling",treeAllowDrop:"treeAllowDrop"},outputs:{onDropCallback:"treeDrop",onDragOverCallback:"treeDropDragOver",onDragLeaveCallback:"treeDropDragLeave",onDragEnterCallback:"treeDropDragEnter"}}),L})(),bg=(()=>{class L{constructor(j,Ae,St,Kt){this.el=j,this.renderer=Ae,this.treeDraggedElement=St,this.ngZone=Kt,this.dragEventHandler=this.onDrag.bind(this)}ngAfterViewInit(){let j=this.el.nativeElement;this.ngZone.runOutsideAngular(()=>{j.addEventListener("drag",this.dragEventHandler)})}ngDoCheck(){this.renderer.setAttribute(this.el.nativeElement,"draggable",this.treeDragEnabled?"true":"false")}ngOnDestroy(){this.el.nativeElement.removeEventListener("drag",this.dragEventHandler)}onDragStart(j){j.dataTransfer.setData("text",j.target.id),this.treeDraggedElement.set(this.draggedElement),this.draggedElement.mouseAction&&this.draggedElement.mouseAction("dragStart",j)}onDrag(j){this.draggedElement.mouseAction&&this.draggedElement.mouseAction("drag",j)}onDragEnd(){this.draggedElement.mouseAction&&this.draggedElement.mouseAction("dragEnd"),this.treeDraggedElement.set(null)}}return L.\u0275fac=function(j){return new(j||L)(r.Y36(r.SBq),r.Y36(r.Qsj),r.Y36(km),r.Y36(r.R0b))},L.\u0275dir=r.lG2({type:L,selectors:[["","treeDrag",""]],hostBindings:function(j,Ae){1&j&&r.NdJ("dragstart",function(Kt){return Ae.onDragStart(Kt)})("dragend",function(){return Ae.onDragEnd()})},inputs:{draggedElement:["treeDrag","draggedElement"],treeDragEnabled:"treeDragEnabled"}}),L})(),O1=(()=>{class L{constructor(j,Ae,St){this.renderer=j,this.templateRef=Ae,this.viewContainerRef=St}set isOpen(j){j?(this._show(),this.isEnabled&&!1===this._isOpen&&this._animateOpen()):this.isEnabled?this._animateClose():this._hide(),this._isOpen=!!j}_show(){this.innerElement||(this.innerElement=this.viewContainerRef.createEmbeddedView(this.templateRef).rootNodes[0])}_hide(){this.viewContainerRef.clear(),this.innerElement=null}_animateOpen(){let j=this.animateSpeed,Ae=this.animateAcceleration,St=0;this.renderer.setStyle(this.innerElement,"max-height","0"),setTimeout(()=>{const Kt=setInterval(()=>{if(!this._isOpen||!this.innerElement)return clearInterval(Kt);St+=j;const ur=Math.round(St);this.renderer.setStyle(this.innerElement,"max-height",`${ur}px`);const Br=this.innerElement.getBoundingClientRect?this.innerElement.getBoundingClientRect().height:0;j*=Ae,Ae*=1.005,Br{if(this._isOpen||!this.innerElement)return clearInterval(Kt);St-=j,this.renderer.setStyle(this.innerElement,"max-height",`${St}px`),j*=Ae,Ae*=1.005,St<=0&&(this.viewContainerRef.clear(),this.innerElement=null,clearInterval(Kt))},17)}}return L.\u0275fac=function(j){return new(j||L)(r.Y36(r.Qsj),r.Y36(r.Rgc),r.Y36(r.s_b))},L.\u0275dir=r.lG2({type:L,selectors:[["","treeAnimateOpen",""]],inputs:{isOpen:["treeAnimateOpen","isOpen"],animateSpeed:["treeAnimateOpenSpeed","animateSpeed"],animateAcceleration:["treeAnimateOpenAcceleration","animateAcceleration"],isEnabled:["treeAnimateOpenEnabled","isEnabled"]}}),L})(),Av=(()=>{class L{}return L.\u0275fac=function(j){return new(j||L)},L.\u0275mod=r.oAB({type:L}),L.\u0275inj=r.cJS({imports:[a.ez]}),L})()},84051:(E,C,s)=>{"use strict";s.d(C,{$7:()=>wr,AR:()=>mn,Hg:()=>jt,Sr:()=>Ro,dX:()=>Ti,ii:()=>ii,nE:()=>da,vq:()=>dn,xD:()=>Fo});var r=s(64537),a=s(88692),c=s(79765),u=s(22759),e=s(26215),f=s(46782),m=s(64762);const T=["*"];function M(gt,Tn){1>&&r._UZ(0,"datatable-progress")}function w(gt,Tn){if(1>&&r._UZ(0,"datatable-summary-row",9),2>){const ie=r.oxw(2);r.Q6J("rowHeight",ie.summaryHeight)("offsetX",ie.offsetX)("innerWidth",ie.innerWidth)("rows",ie.rows)("columns",ie.columns)}}function D(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-body-row",13),r.NdJ("treeAction",function(){r.CHM(ie);const Jt=r.oxw().$implicit,gn=r.oxw(2);return r.KtG(gn.onTreeAction(Jt))})("activate",function(Jt){r.CHM(ie);const gn=r.oxw().index,vi=r.oxw(2),Bi=r.MAs(2);return r.KtG(Bi.onActivate(Jt,vi.indexes.first+gn))}),r.qZA()}if(2>){const ie=r.oxw().$implicit,Ze=r.oxw(2),Jt=r.MAs(2);r.Q6J("isSelected",Jt.getRowSelected(ie))("innerWidth",Ze.innerWidth)("offsetX",Ze.offsetX)("columns",Ze.columns)("rowHeight",Ze.getRowHeight(ie))("row",ie)("rowIndex",Ze.getRowIndex(ie))("expanded",Ze.getRowExpanded(ie))("rowClass",Ze.rowClass)("displayCheck",Ze.displayCheck)("treeStatus",ie&&ie.treeStatus)}}function U(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-body-row",15),r.NdJ("activate",function(Jt){const vi=r.CHM(ie).index;r.oxw(4);const Bi=r.MAs(2);return r.KtG(Bi.onActivate(Jt,vi))}),r.qZA()}if(2>){const ie=Tn.$implicit,Ze=r.oxw(2).$implicit,Jt=r.oxw(2),gn=r.MAs(2);r.Q6J("isSelected",gn.getRowSelected(ie))("innerWidth",Jt.innerWidth)("offsetX",Jt.offsetX)("columns",Jt.columns)("rowHeight",Jt.getRowHeight(ie))("row",ie)("group",Ze.value)("rowIndex",Jt.getRowIndex(ie))("expanded",Jt.getRowExpanded(ie))("rowClass",Jt.rowClass)}}function W(gt,Tn){if(1>&&r.YNc(0,U,1,10,"datatable-body-row",14),2>){const ie=r.oxw().$implicit,Ze=r.oxw(2);r.Q6J("ngForOf",ie.value)("ngForTrackBy",Ze.rowTrackingFn)}}function $(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-row-wrapper",10),r.NdJ("rowContextmenu",function(Jt){r.CHM(ie);const gn=r.oxw(2);return r.KtG(gn.rowContextmenu.emit(Jt))}),r.YNc(1,D,1,11,"datatable-body-row",11),r.YNc(2,W,1,2,"ng-template",null,12,r.W1O),r.qZA()}if(2>){const ie=Tn.$implicit,Ze=Tn.index,Jt=r.MAs(3),gn=r.oxw(2);r.Q6J("groupedRows",gn.groupedRows)("innerWidth",gn.innerWidth)("ngStyle",gn.getRowsStyles(ie))("rowDetail",gn.rowDetail)("groupHeader",gn.groupHeader)("offsetX",gn.offsetX)("detailRowHeight",gn.getDetailRowHeight(ie&&ie[Ze],Ze))("row",ie)("expanded",gn.getRowExpanded(ie))("rowIndex",gn.getRowIndex(ie&&ie[Ze])),r.xp6(1),r.Q6J("ngIf",!gn.groupedRows)("ngIfElse",Jt)}}function J(gt,Tn){if(1>&&r._UZ(0,"datatable-summary-row",16),2>){const ie=r.oxw(2);r.Q6J("ngStyle",ie.getBottomSummaryRowStyles())("rowHeight",ie.summaryHeight)("offsetX",ie.offsetX)("innerWidth",ie.innerWidth)("rows",ie.rows)("columns",ie.columns)}}function F(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-scroller",5),r.NdJ("scroll",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onBodyScroll(Jt))}),r.YNc(1,w,1,5,"datatable-summary-row",6),r.YNc(2,$,4,12,"datatable-row-wrapper",7),r.YNc(3,J,1,6,"datatable-summary-row",8),r.qZA()}if(2>){const ie=r.oxw();r.Q6J("scrollbarV",ie.scrollbarV)("scrollbarH",ie.scrollbarH)("scrollHeight",ie.scrollHeight)("scrollWidth",null==ie.columnGroupWidths?null:ie.columnGroupWidths.total),r.xp6(1),r.Q6J("ngIf",ie.summaryRow&&"top"===ie.summaryPosition),r.xp6(1),r.Q6J("ngForOf",ie.temp)("ngForTrackBy",ie.rowTrackingFn),r.xp6(1),r.Q6J("ngIf",ie.summaryRow&&"bottom"===ie.summaryPosition)}}function X(gt,Tn){if(1>&&r._UZ(0,"div",17),2>){const ie=r.oxw();r.Q6J("innerHTML",ie.emptyMessage,r.oJD)}}function de(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-header-cell",4),r.NdJ("resize",function(Jt){const vi=r.CHM(ie).$implicit,Bi=r.oxw(2);return r.KtG(Bi.onColumnResized(Jt,vi))})("longPressStart",function(Jt){r.CHM(ie);const gn=r.oxw(2);return r.KtG(gn.onLongPressStart(Jt))})("longPressEnd",function(Jt){r.CHM(ie);const gn=r.oxw(2);return r.KtG(gn.onLongPressEnd(Jt))})("sort",function(Jt){r.CHM(ie);const gn=r.oxw(2);return r.KtG(gn.onSort(Jt))})("select",function(Jt){r.CHM(ie);const gn=r.oxw(2);return r.KtG(gn.select.emit(Jt))})("columnContextmenu",function(Jt){r.CHM(ie);const gn=r.oxw(2);return r.KtG(gn.columnContextmenu.emit(Jt))}),r.qZA()}if(2>){const ie=Tn.$implicit,Ze=r.oxw(2);r.Q6J("resizeEnabled",ie.resizeable)("pressModel",ie)("pressEnabled",Ze.reorderable&&ie.draggable)("dragX",Ze.reorderable&&ie.draggable&&ie.dragging)("dragY",!1)("dragModel",ie)("dragEventTarget",Ze.dragEventTarget)("headerHeight",Ze.headerHeight)("isTarget",ie.isTarget)("targetMarkerTemplate",Ze.targetMarkerTemplate)("targetMarkerContext",ie.targetMarkerContext)("column",ie)("sortType",Ze.sortType)("sorts",Ze.sorts)("selectionType",Ze.selectionType)("sortAscendingIcon",Ze.sortAscendingIcon)("sortDescendingIcon",Ze.sortDescendingIcon)("sortUnsetIcon",Ze.sortUnsetIcon)("allRowsSelected",Ze.allRowsSelected)}}function V(gt,Tn){if(1>&&(r.TgZ(0,"div",2),r.YNc(1,de,1,19,"datatable-header-cell",3),r.qZA()),2>){const ie=Tn.$implicit,Ze=r.oxw();r.Tol("datatable-row-"+ie.type),r.Q6J("ngStyle",Ze._styleByGroup[ie.type]),r.xp6(1),r.Q6J("ngForOf",ie.columns)("ngForTrackBy",Ze.columnTrackingFn)}}function ce(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-header",4),r.NdJ("sort",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onColumnSort(Jt))})("resize",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onColumnResize(Jt))})("reorder",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onColumnReorder(Jt))})("select",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onHeaderSelect(Jt))})("columnContextmenu",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onColumnContextmenu(Jt))}),r.ALo(1,"async"),r.qZA()}if(2>){const ie=r.oxw();r.Q6J("sorts",ie.sorts)("sortType",ie.sortType)("scrollbarH",ie.scrollbarH)("innerWidth",ie._innerWidth)("offsetX",r.lcZ(1,15,ie._offsetX))("dealsWithGroup",void 0!==ie.groupedRows)("columns",ie._internalColumns)("headerHeight",ie.headerHeight)("reorderable",ie.reorderable)("targetMarkerTemplate",ie.targetMarkerTemplate)("sortAscendingIcon",ie.cssClasses.sortAscending)("sortDescendingIcon",ie.cssClasses.sortDescending)("sortUnsetIcon",ie.cssClasses.sortUnset)("allRowsSelected",ie.allRowsSelected)("selectionType",ie.selectionType)}}function se(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-footer",5),r.NdJ("page",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onFooterPage(Jt))}),r.qZA()}if(2>){const ie=r.oxw();r.Q6J("rowCount",ie.rowCount)("pageSize",ie.pageSize)("offset",ie.offset)("footerHeight",ie.footerHeight)("footerTemplate",ie.footer)("totalMessage",ie.messages.totalMessage)("pagerLeftArrowIcon",ie.cssClasses.pagerLeftArrow)("pagerRightArrowIcon",ie.cssClasses.pagerRightArrow)("pagerPreviousIcon",ie.cssClasses.pagerPrevious)("selectedCount",ie.selected.length)("selectedMessage",!!ie.selectionType&&ie.messages.selectedMessage)("pagerNextIcon",ie.cssClasses.pagerNext)}}function fe(gt,Tn){}function Te(gt,Tn){if(1>&&r.YNc(0,fe,0,0,"ng-template",5),2>){const ie=r.oxw();r.Q6J("ngTemplateOutlet",ie.targetMarkerTemplate)("ngTemplateOutletContext",ie.targetMarkerContext)}}function $e(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"label",6)(1,"input",7),r.NdJ("change",function(){r.CHM(ie);const Jt=r.oxw();return r.KtG(Jt.select.emit(!Jt.allRowsSelected))}),r.qZA()()}if(2>){const ie=r.oxw();r.xp6(1),r.Q6J("checked",ie.allRowsSelected)}}function ge(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"span",8)(1,"span",9),r.NdJ("click",function(){r.CHM(ie);const Jt=r.oxw();return r.KtG(Jt.onSort())}),r.qZA()()}if(2>){const ie=r.oxw();r.xp6(1),r.Q6J("innerHTML",ie.name,r.oJD)}}function Et(gt,Tn){}function ot(gt,Tn){if(1>&&r.YNc(0,Et,0,0,"ng-template",5),2>){const ie=r.oxw();r.Q6J("ngTemplateOutlet",ie.column.headerTemplate)("ngTemplateOutletContext",ie.cellContext)}}function ct(gt,Tn){}const qe=function(gt,Tn,ie,Ze,Jt){return{rowCount:gt,pageSize:Tn,selectedCount:ie,curPage:Ze,offset:Jt}};function He(gt,Tn){if(1>&&r.YNc(0,ct,0,0,"ng-template",4),2>){const ie=r.oxw();r.Q6J("ngTemplateOutlet",ie.footerTemplate.template)("ngTemplateOutletContext",r.qbA(2,qe,ie.rowCount,ie.pageSize,ie.selectedCount,ie.curPage,ie.offset))}}function We(gt,Tn){if(1>&&(r.TgZ(0,"span"),r._uU(1),r.qZA()),2>){const ie=r.oxw(2);r.xp6(1),r.AsE(" ",null==ie.selectedCount?null:ie.selectedCount.toLocaleString()," ",ie.selectedMessage," / ")}}function Le(gt,Tn){if(1>&&(r.TgZ(0,"div",5),r.YNc(1,We,2,2,"span",1),r._uU(2),r.qZA()),2>){const ie=r.oxw();r.xp6(1),r.Q6J("ngIf",ie.selectedMessage),r.xp6(1),r.AsE(" ",null==ie.rowCount?null:ie.rowCount.toLocaleString()," ",ie.totalMessage," ")}}function Pt(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-pager",6),r.NdJ("change",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.page.emit(Jt))}),r.qZA()}if(2>){const ie=r.oxw();r.Q6J("pagerLeftArrowIcon",ie.pagerLeftArrowIcon)("pagerRightArrowIcon",ie.pagerRightArrowIcon)("pagerPreviousIcon",ie.pagerPreviousIcon)("pagerNextIcon",ie.pagerNextIcon)("page",ie.curPage)("size",ie.pageSize)("count",ie.rowCount)("hidden",!ie.isVisible)}}const it=function(gt){return{"selected-count":gt}};function Xt(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"li",6)(1,"a",7),r.NdJ("click",function(){const gn=r.CHM(ie).$implicit,vi=r.oxw();return r.KtG(vi.selectPage(gn.number))}),r._uU(2),r.qZA()()}if(2>){const ie=Tn.$implicit,Ze=r.oxw();r.ekj("active",ie.number===Ze.page),r.uIk("aria-label","page "+ie.number),r.xp6(2),r.hij(" ",ie.text," ")}}function cn(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"datatable-body-cell",3),r.NdJ("activate",function(Jt){const vi=r.CHM(ie).index,Bi=r.oxw(2);return r.KtG(Bi.onActivate(Jt,vi))})("treeAction",function(){r.CHM(ie);const Jt=r.oxw(2);return r.KtG(Jt.onTreeAction())}),r.qZA()}if(2>){const ie=Tn.$implicit,Ze=r.oxw(2);r.Q6J("row",Ze.row)("group",Ze.group)("expanded",Ze.expanded)("isSelected",Ze.isSelected)("rowIndex",Ze.rowIndex)("column",ie)("rowHeight",Ze.rowHeight)("displayCheck",Ze.displayCheck)("treeStatus",Ze.treeStatus)}}function pn(gt,Tn){if(1>&&(r.TgZ(0,"div",1),r.YNc(1,cn,1,9,"datatable-body-cell",2),r.qZA()),2>){const ie=Tn.$implicit,Ze=r.oxw();r.Gre("datatable-row-",ie.type," datatable-row-group"),r.Q6J("ngStyle",Ze._groupStyles[ie.type]),r.xp6(1),r.Q6J("ngForOf",ie.columns)("ngForTrackBy",Ze.columnTrackingFn)}}function Rn(gt,Tn){}function At(gt,Tn){if(1>&&r.YNc(0,Rn,0,0,"ng-template",4),2>){const ie=r.oxw(2);r.Q6J("ngTemplateOutlet",ie.groupHeader.template)("ngTemplateOutletContext",ie.groupContext)}}function qt(gt,Tn){if(1>&&(r.TgZ(0,"div",3),r.YNc(1,At,1,2,null,1),r.qZA()),2>){const ie=r.oxw();r.Q6J("ngStyle",ie.getGroupHeaderStyle()),r.xp6(1),r.Q6J("ngIf",ie.groupHeader&&ie.groupHeader.template)}}function sn(gt,Tn){1>&&r.Hsn(0,0,["*ngIf","(groupHeader && groupHeader.template && expanded) || !groupHeader || !groupHeader.template"])}function fn(gt,Tn){}function xn(gt,Tn){if(1>&&r.YNc(0,fn,0,0,"ng-template",4),2>){const ie=r.oxw(2);r.Q6J("ngTemplateOutlet",ie.rowDetail.template)("ngTemplateOutletContext",ie.rowContext)}}function Kr(gt,Tn){if(1>&&(r.TgZ(0,"div",5),r.YNc(1,xn,1,2,null,1),r.qZA()),2>){const ie=r.oxw();r.Udp("height",ie.detailRowHeight,"px"),r.xp6(1),r.Q6J("ngIf",ie.rowDetail&&ie.rowDetail.template)}}const Or=["cellTemplate"];function Lr(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"label",4)(1,"input",5),r.NdJ("click",function(Jt){r.CHM(ie);const gn=r.oxw();return r.KtG(gn.onCheckboxChange(Jt))}),r.qZA()()}if(2>){const ie=r.oxw();r.xp6(1),r.Q6J("checked",ie.isSelected)}}function ir(gt,Tn){1>&&r._UZ(0,"i",11)}function Qr(gt,Tn){1>&&r._UZ(0,"i",12)}function jr(gt,Tn){1>&&r._UZ(0,"i",13)}function br(gt,Tn){if(1>){const ie=r.EpF();r.TgZ(0,"button",7),r.NdJ("click",function(){r.CHM(ie);const Jt=r.oxw(2);return r.KtG(Jt.onTreeAction())}),r.TgZ(1,"span"),r.YNc(2,ir,1,0,"i",8),r.YNc(3,Qr,1,0,"i",9),r.YNc(4,jr,1,0,"i",10),r.qZA()()}if(2>){const ie=r.oxw(2);r.Q6J("disabled","disabled"===ie.treeStatus),r.xp6(2),r.Q6J("ngIf","loading"===ie.treeStatus),r.xp6(1),r.Q6J("ngIf","collapsed"===ie.treeStatus),r.xp6(1),r.Q6J("ngIf","expanded"===ie.treeStatus||"disabled"===ie.treeStatus)}}function ht(gt,Tn){}const Wt=function(gt){return{cellContext:gt}};function Tt(gt,Tn){if(1>&&r.YNc(0,ht,0,0,"ng-template",14),2>){const ie=r.oxw(2);r.Q6J("ngTemplateOutlet",ie.column.treeToggleTemplate)("ngTemplateOutletContext",r.VKq(2,Wt,ie.cellContext))}}function wn(gt,Tn){if(1>&&(r.ynx(0),r.YNc(1,br,5,4,"button",6),r.YNc(2,Tt,1,4,null,2),r.BQk()),2>){const ie=r.oxw();r.xp6(1),r.Q6J("ngIf",!ie.column.treeToggleTemplate),r.xp6(1),r.Q6J("ngIf",ie.column.treeToggleTemplate)}}function jn(gt,Tn){if(1>&&r._UZ(0,"span",15),2>){const ie=r.oxw();r.Q6J("title",ie.sanitizedValue)("innerHTML",ie.value,r.oJD)}}function hr(gt,Tn){}function Oi(gt,Tn){if(1>&&r.YNc(0,hr,0,0,"ng-template",14,16,r.W1O),2>){const ie=r.oxw();r.Q6J("ngTemplateOutlet",ie.column.cellTemplate)("ngTemplateOutletContext",ie.cellContext)}}function Wi(gt,Tn){if(1>&&r._UZ(0,"datatable-body-row",1),2>){const ie=r.oxw();r.Q6J("innerWidth",ie.innerWidth)("offsetX",ie.offsetX)("columns",ie._internalColumns)("rowHeight",ie.rowHeight)("row",ie.summaryRow)("rowIndex",-1)}}let so=(()=>{class gt{constructor(ie){this.document=ie,this.width=this.getWidth()}getWidth(){const ie=this.document.createElement("div");ie.style.visibility="hidden",ie.style.width="100px",ie.style.msOverflowStyle="scrollbar",this.document.body.appendChild(ie);const Ze=ie.offsetWidth;ie.style.overflow="scroll";const Jt=this.document.createElement("div");Jt.style.width="100%",ie.appendChild(Jt);const gn=Jt.offsetWidth;return ie.parentNode.removeChild(ie),Ze-gn}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.LFG(a.K0))},gt.\u0275prov=r.Yz7({token:gt,factory:gt.\u0275fac}),gt})(),kr=(()=>{class gt{getDimensions(ie){return ie.getBoundingClientRect()}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275prov=r.Yz7({token:gt,factory:gt.\u0275fac}),gt})(),Ei=(()=>{class gt{constructor(){this.columnInputChanges=new c.xQ}get columnInputChanges$(){return this.columnInputChanges.asObservable()}onInputChange(){this.columnInputChanges.next()}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275prov=r.Yz7({token:gt,factory:gt.\u0275fac}),gt})(),ii=(()=>{class gt{constructor(ie){this.template=ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.Rgc))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","ngx-datatable-footer-template",""]]}),gt})(),mr=(()=>{class gt{constructor(ie,Ze){this.element=ie,this.zone=Ze,this.isVisible=!1,this.visible=new r.vpe}ngOnInit(){this.runCheck()}ngOnDestroy(){clearTimeout(this.timeout)}onVisibilityChange(){this.zone.run(()=>{this.isVisible=!0,this.visible.emit(!0)})}runCheck(){const ie=()=>{const{offsetHeight:Ze,offsetWidth:Jt}=this.element.nativeElement;Ze&&Jt?(clearTimeout(this.timeout),this.onVisibilityChange()):(clearTimeout(this.timeout),this.zone.runOutsideAngular(()=>{this.timeout=setTimeout(()=>ie(),50)}))};this.timeout=setTimeout(()=>ie())}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.SBq),r.Y36(r.R0b))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","visibilityObserver",""]],hostVars:2,hostBindings:function(ie,Ze){2&ie&&r.ekj("visible",Ze.isVisible)},outputs:{visible:"visible"}}),gt})(),pr=(()=>{class gt{constructor(ie){this.dragX=!0,this.dragY=!0,this.dragStart=new r.vpe,this.dragging=new r.vpe,this.dragEnd=new r.vpe,this.isDragging=!1,this.element=ie.nativeElement}ngOnChanges(ie){ie.dragEventTarget&&ie.dragEventTarget.currentValue&&this.dragModel.dragging&&this.onMousedown(ie.dragEventTarget.currentValue)}ngOnDestroy(){this._destroySubscription()}onMouseup(ie){this.isDragging&&(this.isDragging=!1,this.element.classList.remove("dragging"),this.subscription&&(this._destroySubscription(),this.dragEnd.emit({event:ie,element:this.element,model:this.dragModel})))}onMousedown(ie){if(ie.target.classList.contains("draggable")&&(this.dragX||this.dragY)){ie.preventDefault(),this.isDragging=!0;const Jt={x:ie.clientX,y:ie.clientY},gn=(0,u.R)(document,"mouseup");this.subscription=gn.subscribe(Bi=>this.onMouseup(Bi));const vi=(0,u.R)(document,"mousemove").pipe((0,f.R)(gn)).subscribe(Bi=>this.move(Bi,Jt));this.subscription.add(vi),this.dragStart.emit({event:ie,element:this.element,model:this.dragModel})}}move(ie,Ze){if(!this.isDragging)return;const gn=ie.clientY-Ze.y;this.dragX&&(this.element.style.left=ie.clientX-Ze.x+"px"),this.dragY&&(this.element.style.top=`${gn}px`),this.element.classList.add("dragging"),this.dragging.emit({event:ie,element:this.element,model:this.dragModel})}_destroySubscription(){this.subscription&&(this.subscription.unsubscribe(),this.subscription=void 0)}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.SBq))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","draggable",""]],inputs:{dragX:"dragX",dragY:"dragY",dragEventTarget:"dragEventTarget",dragModel:"dragModel"},outputs:{dragStart:"dragStart",dragging:"dragging",dragEnd:"dragEnd"},features:[r.TTD]}),gt})(),Eo=(()=>{class gt{constructor(ie,Ze){this.renderer=Ze,this.resizeEnabled=!0,this.resize=new r.vpe,this.resizing=!1,this.element=ie.nativeElement}ngAfterViewInit(){const ie=this.renderer;this.resizeHandle=ie.createElement("span"),ie.addClass(this.resizeHandle,this.resizeEnabled?"resize-handle":"resize-handle--not-resizable"),ie.appendChild(this.element,this.resizeHandle)}ngOnDestroy(){this._destroySubscription(),this.renderer.destroyNode?this.renderer.destroyNode(this.resizeHandle):this.resizeHandle&&this.renderer.removeChild(this.renderer.parentNode(this.resizeHandle),this.resizeHandle)}onMouseup(){this.resizing=!1,this.subscription&&!this.subscription.closed&&(this._destroySubscription(),this.resize.emit(this.element.clientWidth))}onMousedown(ie){const Ze=ie.target.classList.contains("resize-handle"),Jt=this.element.clientWidth,gn=ie.screenX;if(Ze){ie.stopPropagation(),this.resizing=!0;const vi=(0,u.R)(document,"mouseup");this.subscription=vi.subscribe(Xi=>this.onMouseup());const Bi=(0,u.R)(document,"mousemove").pipe((0,f.R)(vi)).subscribe(Xi=>this.move(Xi,Jt,gn));this.subscription.add(Bi)}}move(ie,Ze,Jt){const vi=Ze+(ie.screenX-Jt);(!this.minWidth||vi>=this.minWidth)&&(!this.maxWidth||vi<=this.maxWidth)&&(this.element.style.width=`${vi}px`)}_destroySubscription(){this.subscription&&(this.subscription.unsubscribe(),this.subscription=void 0)}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.SBq),r.Y36(r.Qsj))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","resizeable",""]],hostVars:2,hostBindings:function(ie,Ze){1&ie&&r.NdJ("mousedown",function(gn){return Ze.onMousedown(gn)}),2&ie&&r.ekj("resizeable",Ze.resizeEnabled)},inputs:{resizeEnabled:"resizeEnabled",minWidth:"minWidth",maxWidth:"maxWidth"},outputs:{resize:"resize"}}),gt})(),po=(()=>{class gt{constructor(ie,Ze){this.document=Ze,this.reorder=new r.vpe,this.targetChanged=new r.vpe,this.differ=ie.find({}).create()}ngAfterContentInit(){this.updateSubscriptions(),this.draggables.changes.subscribe(this.updateSubscriptions.bind(this))}ngOnDestroy(){this.draggables.forEach(ie=>{ie.dragStart.unsubscribe(),ie.dragging.unsubscribe(),ie.dragEnd.unsubscribe()})}updateSubscriptions(){const ie=this.differ.diff(this.createMapDiffs());if(ie){const Ze=({currentValue:gn,previousValue:vi})=>{Jt({previousValue:vi}),gn&&(gn.dragStart.subscribe(this.onDragStart.bind(this)),gn.dragging.subscribe(this.onDragging.bind(this)),gn.dragEnd.subscribe(this.onDragEnd.bind(this)))},Jt=({previousValue:gn})=>{gn&&(gn.dragStart.unsubscribe(),gn.dragging.unsubscribe(),gn.dragEnd.unsubscribe())};ie.forEachAddedItem(Ze),ie.forEachRemovedItem(Jt)}}onDragStart(){this.positions={};let ie=0;for(const Ze of this.draggables.toArray()){const Jt=Ze.element,gn=parseInt(Jt.offsetLeft.toString(),0);this.positions[Ze.dragModel.prop]={left:gn,right:gn+parseInt(Jt.offsetWidth.toString(),0),index:ie++,element:Jt}}}onDragging({model:Ze,event:Jt}){const gn=this.positions[Ze.prop],vi=this.isTarget(Ze,Jt);vi?this.lastDraggingIndex!==vi.i&&(this.targetChanged.emit({prevIndex:this.lastDraggingIndex,newIndex:vi.i,initialIndex:gn.index}),this.lastDraggingIndex=vi.i):this.lastDraggingIndex!==gn.index&&(this.targetChanged.emit({prevIndex:this.lastDraggingIndex,initialIndex:gn.index}),this.lastDraggingIndex=gn.index)}onDragEnd({element:ie,model:Ze,event:Jt}){const gn=this.positions[Ze.prop],vi=this.isTarget(Ze,Jt);vi&&this.reorder.emit({prevIndex:gn.index,newIndex:vi.i,model:Ze}),this.lastDraggingIndex=void 0,ie.style.left="auto"}isTarget(ie,Ze){let Jt=0;const Bi=this.document.elementsFromPoint(Ze.x||Ze.clientX,Ze.y||Ze.clientY);for(const Xi in this.positions){const ws=this.positions[Xi];if(ie.prop!==Xi&&Bi.find(ds=>ds===ws.element))return{pos:ws,i:Jt};Jt++}}createMapDiffs(){return this.draggables.toArray().reduce((ie,Ze)=>(ie[Ze.dragModel.$$id]=Ze,ie),{})}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.aQg),r.Y36(a.K0))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","orderable",""]],contentQueries:function(ie,Ze,Jt){if(1&ie&&r.Suo(Jt,pr,5),2&ie){let gn;r.iGM(gn=r.CRH())&&(Ze.draggables=gn)}},outputs:{reorder:"reorder",targetChanged:"targetChanged"}}),gt})(),$i=(()=>{class gt{constructor(){this.pressEnabled=!0,this.duration=500,this.longPressStart=new r.vpe,this.longPressing=new r.vpe,this.longPressEnd=new r.vpe,this.mouseX=0,this.mouseY=0}get press(){return this.pressing}get isLongPress(){return this.isLongPressing}onMouseDown(ie){if(1!==ie.which||!this.pressEnabled||ie.target.classList.contains("resize-handle"))return;this.mouseX=ie.clientX,this.mouseY=ie.clientY,this.pressing=!0,this.isLongPressing=!1;const Jt=(0,u.R)(document,"mouseup");this.subscription=Jt.subscribe(gn=>this.onMouseup()),this.timeout=setTimeout(()=>{this.isLongPressing=!0,this.longPressStart.emit({event:ie,model:this.pressModel}),this.subscription.add((0,u.R)(document,"mousemove").pipe((0,f.R)(Jt)).subscribe(gn=>this.onMouseMove(gn))),this.loop(ie)},this.duration),this.loop(ie)}onMouseMove(ie){if(this.pressing&&!this.isLongPressing){const Ze=Math.abs(ie.clientX-this.mouseX)>10,Jt=Math.abs(ie.clientY-this.mouseY)>10;(Ze||Jt)&&this.endPress()}}loop(ie){this.isLongPressing&&(this.timeout=setTimeout(()=>{this.longPressing.emit({event:ie,model:this.pressModel}),this.loop(ie)},50))}endPress(){clearTimeout(this.timeout),this.isLongPressing=!1,this.pressing=!1,this._destroySubscription(),this.longPressEnd.emit({model:this.pressModel})}onMouseup(){this.endPress()}ngOnDestroy(){this._destroySubscription()}_destroySubscription(){this.subscription&&(this.subscription.unsubscribe(),this.subscription=void 0)}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275dir=r.lG2({type:gt,selectors:[["","long-press",""]],hostVars:4,hostBindings:function(ie,Ze){1&ie&&r.NdJ("mousedown",function(gn){return Ze.onMouseDown(gn)}),2&ie&&r.ekj("press",Ze.press)("longpress",Ze.isLongPress)},inputs:{pressEnabled:"pressEnabled",duration:"duration",pressModel:"pressModel"},outputs:{longPressStart:"longPressStart",longPressing:"longPressing",longPressEnd:"longPressEnd"}}),gt})(),qr=(()=>{class gt{constructor(ie,Ze,Jt){this.ngZone=ie,this.renderer=Jt,this.scrollbarV=!1,this.scrollbarH=!1,this.scroll=new r.vpe,this.scrollYPos=0,this.scrollXPos=0,this.prevScrollYPos=0,this.prevScrollXPos=0,this._scrollEventListener=null,this.element=Ze.nativeElement}ngOnInit(){if(this.scrollbarV||this.scrollbarH){const ie=this.renderer;this.parentElement=ie.parentNode(ie.parentNode(this.element)),this._scrollEventListener=this.onScrolled.bind(this),this.parentElement.addEventListener("scroll",this._scrollEventListener)}}ngOnDestroy(){this._scrollEventListener&&(this.parentElement.removeEventListener("scroll",this._scrollEventListener),this._scrollEventListener=null)}setOffset(ie){this.parentElement&&(this.parentElement.scrollTop=ie)}onScrolled(ie){const Ze=ie.currentTarget;requestAnimationFrame(()=>{this.scrollYPos=Ze.scrollTop,this.scrollXPos=Ze.scrollLeft,this.updateOffset()})}updateOffset(){let ie;this.scrollYPosthis.prevScrollYPos&&(ie="up"),this.scroll.emit({direction:ie,scrollYPos:this.scrollYPos,scrollXPos:this.scrollXPos}),this.prevScrollYPos=this.scrollYPos,this.prevScrollXPos=this.scrollXPos}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.R0b),r.Y36(r.SBq),r.Y36(r.Qsj))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-scroller"]],hostAttrs:[1,"datatable-scroll"],hostVars:4,hostBindings:function(ie,Ze){2&ie&&r.Udp("height",Ze.scrollHeight,"px")("width",Ze.scrollWidth,"px")},inputs:{scrollbarV:"scrollbarV",scrollbarH:"scrollbarH",scrollHeight:"scrollHeight",scrollWidth:"scrollWidth"},outputs:{scroll:"scroll"},ngContentSelectors:T,decls:1,vars:0,template:function(ie,Ze){1&ie&&(r.F$t(),r.Hsn(0))},encapsulation:2,changeDetection:0}),gt})(),Hi=(()=>{class gt{constructor(ie){this.template=ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.Rgc))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","ngx-datatable-group-header-template",""]]}),gt})(),Dn=(()=>{class gt{constructor(){this.rowHeight=0,this.toggle=new r.vpe}get template(){return this._templateInput||this._templateQuery}toggleExpandGroup(ie){this.toggle.emit({type:"group",value:ie})}expandAllGroups(){this.toggle.emit({type:"all",value:!0})}collapseAllGroups(){this.toggle.emit({type:"all",value:!1})}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275dir=r.lG2({type:gt,selectors:[["ngx-datatable-group-header"]],contentQueries:function(ie,Ze,Jt){if(1&ie&&r.Suo(Jt,Hi,7,r.Rgc),2&ie){let gn;r.iGM(gn=r.CRH())&&(Ze._templateQuery=gn.first)}},inputs:{rowHeight:"rowHeight",_templateInput:["template","_templateInput"]},outputs:{toggle:"toggle"}}),gt})();function Hn(){return""}function jt(gt){return null==gt?Hn:"number"==typeof gt?Fe:-1!==gt.indexOf(".")?et:Ie}function Fe(gt,Tn){return null==gt?"":gt&&null!=Tn?gt[Tn]??"":gt}function Ie(gt,Tn){return null==gt?"":gt&&Tn?gt[Tn]??"":gt}function et(gt,Tn){if(null==gt)return"";if(!gt||!Tn)return gt;let ie=gt[Tn];if(void 0!==ie)return ie;ie=gt;const Ze=Tn.split(".");if(Ze.length)for(let Jt=0;Jtjt(gt)(Tn,gt))}function an(gt,Tn,ie){if(Tn&&ie){const Ze={},Jt=gt.length;let gn=null;Ze[0]=new lt;const vi=gt.reduce((Xi,ws)=>{const ds=ie(ws);return-1===Xi.indexOf(ds)&&Xi.push(ds),Xi},[]);for(let Xi=0;Xi-1&&(ws=ds),gn.parent=Ze[ws],gn.row.level=gn.parent.row.level+1,gn.parent.children.push(gn)}let Bi=[];return Ze[0].flatten(function(){Bi=[...Bi,this.row]},!0),Bi}return gt}class lt{constructor(Tn=null){Tn||(Tn={level:-1,treeStatus:"expanded"}),this.row=Tn,this.parent=null,this.children=[]}flatten(Tn,ie){if("expanded"===this.row.treeStatus)for(let Ze=0,Jt=this.children.length;Ze` ${Tn}`).replace(/^./,Tn=>Tn.toUpperCase())}function gr(gt){if(!gt)return;let Tn=!1;for(const ie of gt)ie.$$id||(ie.$$id=("0000"+(Math.random()*Math.pow(36,4)<<0).toString(36)).slice(-4)),Pn(ie.prop)&&ie.name&&(ie.prop=Rt(ie.name)),ie.$$valueGetter||(ie.$$valueGetter=jt(ie.prop)),!Pn(ie.prop)&&Pn(ie.name)&&(ie.name=Pe(String(ie.prop))),Pn(ie.prop)&&Pn(ie.name)&&(ie.name=""),ie.hasOwnProperty("resizeable")||(ie.resizeable=!0),ie.hasOwnProperty("sortable")||(ie.sortable=!0),ie.hasOwnProperty("draggable")||(ie.draggable=!0),ie.hasOwnProperty("canAutoResize")||(ie.canAutoResize=!0),ie.hasOwnProperty("width")||(ie.width=150),ie.hasOwnProperty("isTreeColumn")&&ie.isTreeColumn&&!Tn?Tn=!0:ie.isTreeColumn=!1}function Pn(gt){return null==gt}var Pr=(()=>{return(gt=Pr||(Pr={})).standard="standard",gt.flex="flex",gt.force="force",Pr;var gt})(),tr=(()=>{return(gt=tr||(tr={})).single="single",gt.multi="multi",gt.multiClick="multiClick",gt.cell="cell",gt.checkbox="checkbox",tr;var gt})(),Zn=(()=>{return(gt=Zn||(Zn={})).single="single",gt.multi="multi",Zn;var gt})(),nr=(()=>{return(gt=nr||(nr={})).header="header",gt.body="body",nr;var gt})();let Zt=(()=>{class gt{constructor(ie){this.template=ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.Rgc))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","ngx-datatable-header-template",""]]}),gt})(),dn=(()=>{class gt{constructor(ie){this.template=ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.Rgc))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","ngx-datatable-cell-template",""]]}),gt})(),Ge=(()=>{class gt{constructor(ie){this.template=ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.Rgc))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","ngx-datatable-tree-toggle",""]]}),gt})(),Ot=(()=>{class gt{constructor(ie){this.columnChangesService=ie,this.isFirstChange=!0}get cellTemplate(){return this._cellTemplateInput||this._cellTemplateQuery}get headerTemplate(){return this._headerTemplateInput||this._headerTemplateQuery}get treeToggleTemplate(){return this._treeToggleTemplateInput||this._treeToggleTemplateQuery}ngOnChanges(){this.isFirstChange?this.isFirstChange=!1:this.columnChangesService.onInputChange()}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(Ei))},gt.\u0275dir=r.lG2({type:gt,selectors:[["ngx-datatable-column"]],contentQueries:function(ie,Ze,Jt){if(1&ie&&(r.Suo(Jt,dn,7,r.Rgc),r.Suo(Jt,Zt,7,r.Rgc),r.Suo(Jt,Ge,7,r.Rgc)),2&ie){let gn;r.iGM(gn=r.CRH())&&(Ze._cellTemplateQuery=gn.first),r.iGM(gn=r.CRH())&&(Ze._headerTemplateQuery=gn.first),r.iGM(gn=r.CRH())&&(Ze._treeToggleTemplateQuery=gn.first)}},inputs:{name:"name",prop:"prop",frozenLeft:"frozenLeft",frozenRight:"frozenRight",flexGrow:"flexGrow",resizeable:"resizeable",comparator:"comparator",pipe:"pipe",sortable:"sortable",draggable:"draggable",canAutoResize:"canAutoResize",minWidth:"minWidth",width:"width",maxWidth:"maxWidth",checkboxable:"checkboxable",headerCheckboxable:"headerCheckboxable",headerClass:"headerClass",cellClass:"cellClass",isTreeColumn:"isTreeColumn",treeLevelIndent:"treeLevelIndent",summaryFunc:"summaryFunc",summaryTemplate:"summaryTemplate",_cellTemplateInput:["cellTemplate","_cellTemplateInput"],_headerTemplateInput:["headerTemplate","_headerTemplateInput"],_treeToggleTemplateInput:["treeToggleTemplate","_treeToggleTemplateInput"]},features:[r.TTD]}),gt})(),mn=(()=>{class gt{constructor(ie){this.template=ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.Rgc))},gt.\u0275dir=r.lG2({type:gt,selectors:[["","ngx-datatable-row-detail-template",""]]}),gt})(),wr=(()=>{class gt{constructor(){this.rowHeight=0,this.toggle=new r.vpe}get template(){return this._templateInput||this._templateQuery}toggleExpandRow(ie){this.toggle.emit({type:"row",value:ie})}expandAllRows(){this.toggle.emit({type:"all",value:!0})}collapseAllRows(){this.toggle.emit({type:"all",value:!1})}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275dir=r.lG2({type:gt,selectors:[["ngx-datatable-row-detail"]],contentQueries:function(ie,Ze,Jt){if(1&ie&&r.Suo(Jt,mn,7,r.Rgc),2&ie){let gn;r.iGM(gn=r.CRH())&&(Ze._templateQuery=gn.first)}},inputs:{rowHeight:"rowHeight",_templateInput:["template","_templateInput"]},outputs:{toggle:"toggle"}}),gt})(),Ti=(()=>{class gt{get template(){return this._templateInput||this._templateQuery}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275dir=r.lG2({type:gt,selectors:[["ngx-datatable-footer"]],contentQueries:function(ie,Ze,Jt){if(1&ie&&r.Suo(Jt,ii,5,r.Rgc),2&ie){let gn;r.iGM(gn=r.CRH())&&(Ze._templateQuery=gn.first)}},inputs:{footerHeight:"footerHeight",totalMessage:"totalMessage",selectedMessage:"selectedMessage",pagerLeftArrowIcon:"pagerLeftArrowIcon",pagerRightArrowIcon:"pagerRightArrowIcon",pagerPreviousIcon:"pagerPreviousIcon",pagerNextIcon:"pagerNextIcon",_templateInput:["template","_templateInput"]}}),gt})();function Ci(gt){const Tn={left:[],center:[],right:[]};if(gt)for(const ie of gt)ie.frozenLeft?Tn.left.push(ie):ie.frozenRight?Tn.right.push(ie):Tn.center.push(ie);return Tn}function Ai(gt,Tn){return{left:Ko(gt.left),center:Ko(gt.center),right:Ko(gt.right),total:Math.floor(Ko(Tn))}}function Ko(gt,Tn){let ie=0;if(gt)for(const Ze of gt)ie+=parseFloat(Tn&&Ze[Tn]?Ze[Tn]:Ze.width);return ie}function dr(gt){const Tn=[],ie=Ci(gt);return Tn.push({type:"left",columns:ie.left}),Tn.push({type:"center",columns:ie.center}),Tn.push({type:"right",columns:ie.right}),Tn}class Ni{constructor(){this.treeArray=[]}clearCache(){this.treeArray=[]}initCache(Tn){const{rows:ie,rowHeight:Ze,detailRowHeight:Jt,externalVirtual:gn,rowCount:vi,rowIndexes:Bi,rowExpansions:Xi}=Tn,ws="function"==typeof Ze,ds="function"==typeof Jt;if(!ws&&isNaN(Ze))throw new Error(`Row Height cache initialization failed. Please ensure that 'rowHeight' is a\n valid number or function value: (${Ze}) when 'scrollbarV' is enabled.`);if(!ds&&isNaN(Jt))throw new Error(`Row Height cache initialization failed. Please ensure that 'detailRowHeight' is a\n valid number or function value: (${Jt}) when 'scrollbarV' is enabled.`);const qs=gn?vi:ie.length;this.treeArray=new Array(qs);for(let Js=0;Js=0;)ie+=this.treeArray[Tn],Tn=(Tn&Tn+1)-1;return ie}queryBetween(Tn,ie){return this.query(ie)-this.query(Tn-1)}calcRowIndex(Tn){if(!this.treeArray.length)return 0;let ie=-1;const Ze=this.treeArray.length;for(let gn=Math.pow(2,Ze.toString(2).length-1);0!==gn;gn>>=1){const vi=ie+gn;vi=this.treeArray[vi]&&(Tn-=this.treeArray[vi],ie=vi)}return ie+1}}const ti={},Vr=typeof document<"u"?document.createElement("div").style:void 0,ji=function(){const gt=typeof window<"u"?window.getComputedStyle(document.documentElement,""):void 0,Tn=typeof gt<"u"?Array.prototype.slice.call(gt).join("").match(/-(moz|webkit|ms)-/):null,ie=null!==Tn?Tn[1]:void 0,Ze=typeof ie<"u"?"WebKit|Moz|MS|O".match(new RegExp("("+ie+")","i"))[1]:void 0;return Ze?{dom:Ze,lowercase:ie,css:`-${ie}-`,js:ie[0].toUpperCase()+ie.substr(1)}:void 0}();function Vi(gt){const Tn=Rt(gt);return ti[Tn]||(void 0!==ji&&void 0!==Vr[ji.css+gt]?ti[Tn]=ji.css+gt:void 0!==Vr[gt]&&(ti[Tn]=gt)),ti[Tn]}const Po=typeof window<"u"?Vi("transform"):void 0,ko=typeof window<"u"?Vi("backfaceVisibility"):void 0,Ir=typeof window<"u"?!!Vi("transform"):void 0,ro=typeof window<"u"?!!Vi("perspective"):void 0,Vt=typeof window<"u"?window.navigator.userAgent:"Chrome",bn=/Safari\//.test(Vt)&&!/Chrome\//.test(Vt);function Bn(gt,Tn,ie){typeof Po<"u"&&Ir?!bn&&ro?(gt[Po]=`translate3d(${Tn}px, ${ie}px, 0)`,gt[ko]="hidden"):gt[Rt(Po)]=`translate(${Tn}px, ${ie}px)`:(gt.top=`${ie}px`,gt.left=`${Tn}px`)}let ci=(()=>{class gt{constructor(ie){this.cd=ie,this.selected=[],this.scroll=new r.vpe,this.page=new r.vpe,this.activate=new r.vpe,this.select=new r.vpe,this.detailToggle=new r.vpe,this.rowContextmenu=new r.vpe(!1),this.treeAction=new r.vpe,this.rowHeightsCache=new Ni,this.temp=[],this.offsetY=0,this.indexes={},this.rowIndexes=new WeakMap,this.rowExpansions=[],this.getDetailRowHeight=(Ze,Jt)=>{if(!this.rowDetail)return 0;const gn=this.rowDetail.rowHeight;return"function"==typeof gn?gn(Ze,Jt):gn},this.rowTrackingFn=(Ze,Jt)=>{const gn=this.getRowIndex(Jt);return this.trackByProp?Jt[this.trackByProp]:gn}}set pageSize(ie){this._pageSize=ie,this.recalcLayout()}get pageSize(){return this._pageSize}set rows(ie){this._rows=ie,this.recalcLayout()}get rows(){return this._rows}set columns(ie){this._columns=ie;const Ze=Ci(ie);this.columnGroupWidths=Ai(Ze,ie)}get columns(){return this._columns}set offset(ie){this._offset=ie,(!this.scrollbarV||this.scrollbarV&&!this.virtualization)&&this.recalcLayout()}get offset(){return this._offset}set rowCount(ie){this._rowCount=ie,this.recalcLayout()}get rowCount(){return this._rowCount}get bodyWidth(){return this.scrollbarH?this.innerWidth+"px":"100%"}set bodyHeight(ie){this._bodyHeight=this.scrollbarV?ie+"px":"auto",this.recalcLayout()}get bodyHeight(){return this._bodyHeight}get selectEnabled(){return!!this.selectionType}get scrollHeight(){if(this.scrollbarV&&this.virtualization&&this.rowCount)return this.rowHeightsCache.query(this.rowCount-1)}ngOnInit(){this.rowDetail&&(this.listener=this.rowDetail.toggle.subscribe(({type:ie,value:Ze})=>{"row"===ie&&this.toggleRowExpansion(Ze),"all"===ie&&this.toggleAllRows(Ze),this.updateIndexes(),this.updateRows(),this.cd.markForCheck()})),this.groupHeader&&(this.listener=this.groupHeader.toggle.subscribe(({type:ie,value:Ze})=>{"group"===ie&&this.toggleRowExpansion(Ze),"all"===ie&&this.toggleAllRows(Ze),this.updateIndexes(),this.updateRows(),this.cd.markForCheck()}))}ngOnDestroy(){(this.rowDetail||this.groupHeader)&&this.listener.unsubscribe()}updateOffsetY(ie){this.scroller&&(this.scrollbarV&&this.virtualization&&ie?ie=this.rowHeightsCache.query(this.pageSize*ie-1):this.scrollbarV&&!this.virtualization&&(ie=0),this.scroller.setOffset(ie||0))}onBodyScroll(ie){const Ze=ie.scrollYPos,Jt=ie.scrollXPos;(this.offsetY!==Ze||this.offsetX!==Jt)&&this.scroll.emit({offsetY:Ze,offsetX:Jt}),this.offsetY=Ze,this.offsetX=Jt,this.updateIndexes(),this.updatePage(ie.direction),this.updateRows()}updatePage(ie){let Ze=this.indexes.first/this.pageSize;"up"===ie?Ze=Math.ceil(Ze):"down"===ie&&(Ze=Math.floor(Ze)),void 0!==ie&&!isNaN(Ze)&&this.page.emit({offset:Ze})}updateRows(){const{first:ie,last:Ze}=this.indexes;let Jt=ie,gn=0;const vi=[];if(this.groupedRows){let Bi=3;for(1===this.groupedRows.length&&(Bi=this.groupedRows[0].value.length);Jt{this.rowIndexes.set(ws,`${Jt}-${ds}`)}),vi[gn]=Xi,gn++,Jt++}}else for(;Jtthis.loadingIndicator=!1,500)}updateIndexes(){let ie=0,Ze=0;if(this.scrollbarV)if(this.virtualization){const Jt=parseInt(this.bodyHeight,0);ie=this.rowHeightsCache.getRowIndex(this.offsetY),Ze=this.rowHeightsCache.getRowIndex(Jt+this.offsetY)+1}else ie=0,Ze=this.rowCount;else this.externalPaging||(ie=Math.max(this.offset*this.pageSize,0)),Ze=Math.min(ie+this.pageSize,this.rowCount);this.indexes={first:ie,last:Ze}}refreshRowHeightCache(){if(this.scrollbarV&&(!this.scrollbarV||this.virtualization)&&(this.rowHeightsCache.clearCache(),this.rows&&this.rows.length)){const ie=new Set;for(const Ze of this.rows)this.getRowExpanded(Ze)&&ie.add(Ze);this.rowHeightsCache.initCache({rows:this.rows,rowHeight:this.rowHeight,detailRowHeight:this.getDetailRowHeight,externalVirtual:this.scrollbarV&&this.externalPaging,rowCount:this.rowCount,rowIndexes:this.rowIndexes,rowExpansions:ie})}}getAdjustedViewPortIndex(){const ie=this.indexes.first;return this.scrollbarV&&this.virtualization&&this.rowHeightsCache.query(ie-1)<=this.offsetY?ie-1:ie}toggleRowExpansion(ie){const Ze=this.getAdjustedViewPortIndex(),Jt=this.getRowExpandedIdx(ie,this.rowExpansions),gn=Jt>-1;if(this.scrollbarV&&this.virtualization){const vi=this.getDetailRowHeight(ie)*(gn?-1:1),Bi=this.getRowIndex(ie);this.rowHeightsCache.update(Bi,vi)}gn?this.rowExpansions.splice(Jt,1):this.rowExpansions.push(ie),this.detailToggle.emit({rows:[ie],currentIndex:Ze})}toggleAllRows(ie){this.rowExpansions=[];const Ze=this.getAdjustedViewPortIndex();if(ie)for(const Jt of this.rows)this.rowExpansions.push(Jt);this.scrollbarV&&this.recalcLayout(),this.detailToggle.emit({rows:this.rows,currentIndex:Ze})}recalcLayout(){this.refreshRowHeightCache(),this.updateIndexes(),this.updateRows()}columnTrackingFn(ie,Ze){return Ze.$$id}stylesByGroup(ie){const Ze=this.columnGroupWidths,Jt=this.offsetX,gn={width:`${Ze[ie]}px`};if("left"===ie)Bn(gn,Jt,0);else if("right"===ie){const vi=parseInt(this.innerWidth+"",0);Bn(gn,-1*(Ze.total-vi-Jt),0)}return gn}getRowExpanded(ie){if(0===this.rowExpansions.length&&this.groupExpansionDefault)for(const Ze of this.groupedRows)this.rowExpansions.push(Ze);return this.getRowExpandedIdx(ie,this.rowExpansions)>-1}getRowExpandedIdx(ie,Ze){if(!Ze||!Ze.length)return-1;const Jt=this.rowIdentity(ie);return Ze.findIndex(gn=>this.rowIdentity(gn)===Jt)}getRowIndex(ie){return this.rowIndexes.get(ie)||0}onTreeAction(ie){this.treeAction.emit({row:ie})}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.sBO))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-body"]],viewQuery:function(ie,Ze){if(1&ie&&r.Gf(qr,5),2&ie){let Jt;r.iGM(Jt=r.CRH())&&(Ze.scroller=Jt.first)}},hostAttrs:[1,"datatable-body"],hostVars:4,hostBindings:function(ie,Ze){2&ie&&r.Udp("width",Ze.bodyWidth)("height",Ze.bodyHeight)},inputs:{selected:"selected",pageSize:"pageSize",rows:"rows",columns:"columns",offset:"offset",rowCount:"rowCount",bodyHeight:"bodyHeight",offsetX:"offsetX",loadingIndicator:"loadingIndicator",scrollbarV:"scrollbarV",scrollbarH:"scrollbarH",externalPaging:"externalPaging",rowHeight:"rowHeight",emptyMessage:"emptyMessage",selectionType:"selectionType",rowIdentity:"rowIdentity",rowDetail:"rowDetail",groupHeader:"groupHeader",selectCheck:"selectCheck",displayCheck:"displayCheck",trackByProp:"trackByProp",rowClass:"rowClass",groupedRows:"groupedRows",groupExpansionDefault:"groupExpansionDefault",innerWidth:"innerWidth",groupRowsBy:"groupRowsBy",virtualization:"virtualization",summaryRow:"summaryRow",summaryPosition:"summaryPosition",summaryHeight:"summaryHeight"},outputs:{scroll:"scroll",page:"page",activate:"activate",select:"select",detailToggle:"detailToggle",rowContextmenu:"rowContextmenu",treeAction:"treeAction"},decls:5,vars:9,consts:[[4,"ngIf"],[3,"selected","rows","selectCheck","selectEnabled","selectionType","rowIdentity","select","activate"],["selector",""],[3,"scrollbarV","scrollbarH","scrollHeight","scrollWidth","scroll",4,"ngIf"],["class","empty-row",3,"innerHTML",4,"ngIf"],[3,"scrollbarV","scrollbarH","scrollHeight","scrollWidth","scroll"],[3,"rowHeight","offsetX","innerWidth","rows","columns",4,"ngIf"],[3,"groupedRows","innerWidth","ngStyle","rowDetail","groupHeader","offsetX","detailRowHeight","row","expanded","rowIndex","rowContextmenu",4,"ngFor","ngForOf","ngForTrackBy"],[3,"ngStyle","rowHeight","offsetX","innerWidth","rows","columns",4,"ngIf"],[3,"rowHeight","offsetX","innerWidth","rows","columns"],[3,"groupedRows","innerWidth","ngStyle","rowDetail","groupHeader","offsetX","detailRowHeight","row","expanded","rowIndex","rowContextmenu"],["tabindex","-1",3,"isSelected","innerWidth","offsetX","columns","rowHeight","row","rowIndex","expanded","rowClass","displayCheck","treeStatus","treeAction","activate",4,"ngIf","ngIfElse"],["groupedRowsTemplate",""],["tabindex","-1",3,"isSelected","innerWidth","offsetX","columns","rowHeight","row","rowIndex","expanded","rowClass","displayCheck","treeStatus","treeAction","activate"],["tabindex","-1",3,"isSelected","innerWidth","offsetX","columns","rowHeight","row","group","rowIndex","expanded","rowClass","activate",4,"ngFor","ngForOf","ngForTrackBy"],["tabindex","-1",3,"isSelected","innerWidth","offsetX","columns","rowHeight","row","group","rowIndex","expanded","rowClass","activate"],[3,"ngStyle","rowHeight","offsetX","innerWidth","rows","columns"],[1,"empty-row",3,"innerHTML"]],template:function(ie,Ze){1&ie&&(r.YNc(0,M,1,0,"datatable-progress",0),r.TgZ(1,"datatable-selection",1,2),r.NdJ("select",function(gn){return Ze.select.emit(gn)})("activate",function(gn){return Ze.activate.emit(gn)}),r.YNc(3,F,4,8,"datatable-scroller",3),r.YNc(4,X,1,1,"div",4),r.qZA()),2&ie&&(r.Q6J("ngIf",Ze.loadingIndicator),r.xp6(1),r.Q6J("selected",Ze.selected)("rows",Ze.rows)("selectCheck",Ze.selectCheck)("selectEnabled",Ze.selectEnabled)("selectionType",Ze.selectionType)("rowIdentity",Ze.rowIdentity),r.xp6(2),r.Q6J("ngIf",null==Ze.rows?null:Ze.rows.length),r.xp6(1),r.Q6J("ngIf",!(null!=Ze.rows&&Ze.rows.length||Ze.loadingIndicator)))},dependencies:function(){return[a.sg,a.O5,a.PC,qr,Ha,hs,$s,Xo,ns]},encapsulation:2,changeDetection:0}),gt})(),_o=(()=>{class gt{constructor(ie){this.cd=ie,this.sort=new r.vpe,this.reorder=new r.vpe,this.resize=new r.vpe,this.select=new r.vpe,this.columnContextmenu=new r.vpe(!1),this._columnGroupWidths={total:100},this._styleByGroup={left:{},center:{},right:{}},this.destroyed=!1}set innerWidth(ie){this._innerWidth=ie,setTimeout(()=>{if(this._columns){const Ze=Ci(this._columns);this._columnGroupWidths=Ai(Ze,this._columns),this.setStylesByGroup()}})}get innerWidth(){return this._innerWidth}set headerHeight(ie){this._headerHeight="auto"!==ie?`${ie}px`:ie}get headerHeight(){return this._headerHeight}set columns(ie){this._columns=ie;const Ze=Ci(ie);this._columnsByPin=dr(ie),setTimeout(()=>{this._columnGroupWidths=Ai(Ze,ie),this.setStylesByGroup()})}get columns(){return this._columns}set offsetX(ie){this._offsetX=ie,this.setStylesByGroup()}get offsetX(){return this._offsetX}ngOnDestroy(){this.destroyed=!0}onLongPressStart({event:ie,model:Ze}){Ze.dragging=!0,this.dragEventTarget=ie}onLongPressEnd({event:ie,model:Ze}){this.dragEventTarget=ie,setTimeout(()=>{const Jt=this._columns.find(gn=>gn.$$id===Ze.$$id);Jt&&(Jt.dragging=!1)},5)}get headerWidth(){return this.scrollbarH?this.innerWidth+"px":"100%"}trackByGroups(ie,Ze){return Ze.type}columnTrackingFn(ie,Ze){return Ze.$$id}onColumnResized(ie,Ze){ie<=Ze.minWidth?ie=Ze.minWidth:ie>=Ze.maxWidth&&(ie=Ze.maxWidth),this.resize.emit({column:Ze,prevValue:Ze.width,newValue:ie})}onColumnReordered({prevIndex:ie,newIndex:Ze,model:Jt}){const gn=this.getColumn(Ze);gn.isTarget=!1,gn.targetMarkerContext=void 0,this.reorder.emit({column:Jt,prevValue:ie,newValue:Ze})}onTargetChanged({prevIndex:ie,newIndex:Ze,initialIndex:Jt}){if(ie||0===ie){const gn=this.getColumn(ie);gn.isTarget=!1,gn.targetMarkerContext=void 0}if(Ze||0===Ze){const gn=this.getColumn(Ze);gn.isTarget=!0,Jt!==Ze&&(gn.targetMarkerContext={class:"targetMarker ".concat(Jt>Ze?"dragFromRight":"dragFromLeft")})}}getColumn(ie){const Ze=this._columnsByPin[0].columns.length;if(ie((Bi=Object.assign({},Bi)).prop===ie.prop&&(gn=Xi),Bi));return void 0===Jt?vi.splice(gn,1):Ze?vi[gn].dir=Jt:(this.sortType===Zn.single&&vi.splice(0,this.sorts.length),vi.push({dir:Jt,prop:ie.prop})),vi}setStylesByGroup(){this._styleByGroup.left=this.calcStylesByGroup("left"),this._styleByGroup.center=this.calcStylesByGroup("center"),this._styleByGroup.right=this.calcStylesByGroup("right"),this.destroyed||this.cd.detectChanges()}calcStylesByGroup(ie){const Ze=this._columnGroupWidths,gn={width:`${Ze[ie]}px`};return"center"===ie?Bn(gn,-1*this.offsetX,0):"right"===ie&&Bn(gn,-1*(Ze.total-this.innerWidth),0),gn}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.sBO))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-header"]],hostAttrs:[1,"datatable-header"],hostVars:4,hostBindings:function(ie,Ze){2&ie&&r.Udp("height",Ze.headerHeight)("width",Ze.headerWidth)},inputs:{innerWidth:"innerWidth",headerHeight:"headerHeight",columns:"columns",offsetX:"offsetX",sorts:"sorts",sortAscendingIcon:"sortAscendingIcon",sortDescendingIcon:"sortDescendingIcon",sortUnsetIcon:"sortUnsetIcon",scrollbarH:"scrollbarH",dealsWithGroup:"dealsWithGroup",targetMarkerTemplate:"targetMarkerTemplate",sortType:"sortType",allRowsSelected:"allRowsSelected",selectionType:"selectionType",reorderable:"reorderable"},outputs:{sort:"sort",reorder:"reorder",resize:"resize",select:"select",columnContextmenu:"columnContextmenu"},decls:2,vars:4,consts:[["orderable","",1,"datatable-header-inner",3,"reorder","targetChanged"],[3,"class","ngStyle",4,"ngFor","ngForOf","ngForTrackBy"],[3,"ngStyle"],["resizeable","","long-press","","draggable","",3,"resizeEnabled","pressModel","pressEnabled","dragX","dragY","dragModel","dragEventTarget","headerHeight","isTarget","targetMarkerTemplate","targetMarkerContext","column","sortType","sorts","selectionType","sortAscendingIcon","sortDescendingIcon","sortUnsetIcon","allRowsSelected","resize","longPressStart","longPressEnd","sort","select","columnContextmenu",4,"ngFor","ngForOf","ngForTrackBy"],["resizeable","","long-press","","draggable","",3,"resizeEnabled","pressModel","pressEnabled","dragX","dragY","dragModel","dragEventTarget","headerHeight","isTarget","targetMarkerTemplate","targetMarkerContext","column","sortType","sorts","selectionType","sortAscendingIcon","sortDescendingIcon","sortUnsetIcon","allRowsSelected","resize","longPressStart","longPressEnd","sort","select","columnContextmenu"]],template:function(ie,Ze){1&ie&&(r.TgZ(0,"div",0),r.NdJ("reorder",function(gn){return Ze.onColumnReordered(gn)})("targetChanged",function(gn){return Ze.onTargetChanged(gn)}),r.YNc(1,V,2,5,"div",1),r.qZA()),2&ie&&(r.Udp("width",Ze._columnGroupWidths.total,"px"),r.xp6(1),r.Q6J("ngForOf",Ze._columnsByPin)("ngForTrackBy",Ze.trackByGroups))},dependencies:function(){return[a.sg,a.PC,pr,Eo,po,$i,$a]},encapsulation:2,changeDetection:0}),gt})();function go(gt,Tn,ie){ie=ie||{};let Ze,Jt,gn,vi=null,Bi=0;function Xi(){Bi=!1===ie.leading?0:+new Date,vi=null,gn=gt.apply(Ze,Jt)}return function(){const ws=+new Date;!Bi&&!1===ie.leading&&(Bi=ws);const ds=Tn-(ws-Bi);return Ze=this,Jt=arguments,ds<=0?(clearTimeout(vi),vi=null,Bi=ws,gn=gt.apply(Ze,Jt)):!vi&&!1!==ie.trailing&&(vi=setTimeout(Xi,ds)),gn}}function es(gt,Tn){return function(Ze,Jt,gn){return{configurable:!0,enumerable:gn.enumerable,get:function(){return Object.defineProperty(this,Jt,{configurable:!0,enumerable:gn.enumerable,value:go(gn.value,gt,Tn)}),this[Jt]}}}}function Is(gt,Tn){for(const ie of Tn){const Ze=gt.indexOf(ie);gt.splice(Ze,1)}}function la(gt,Tn=300){let ie=0;for(const Ze of gt)ie+=Ze.width||Tn;return ie}var Ro=(()=>{return(gt=Ro||(Ro={})).asc="asc",gt.desc="desc",Ro;var gt})();function gl(gt,Tn){if((null===gt||typeof gt>"u")&&(gt=0),(null===Tn||typeof Tn>"u")&&(Tn=0),gt instanceof Date&&Tn instanceof Date){if(gtTn)return 1}else if(isNaN(parseFloat(gt))||!isFinite(gt)||isNaN(parseFloat(Tn))||!isFinite(Tn)){if(gt=String(gt),Tn=String(Tn),gt.toLowerCase()Tn.toLowerCase())return 1}else{if(parseFloat(gt)parseFloat(Tn))return 1}return 0}let da=(()=>{class gt{constructor(ie,Ze,Jt,gn,vi,Bi,Xi){this.scrollbarHelper=ie,this.dimensionsHelper=Ze,this.cd=Jt,this.columnChangesService=Bi,this.configuration=Xi,this.selected=[],this.scrollbarV=!1,this.scrollbarH=!1,this.rowHeight=30,this.columnMode=Pr.standard,this.headerHeight=30,this.footerHeight=0,this.externalPaging=!1,this.externalSorting=!1,this.loadingIndicator=!1,this.reorderable=!0,this.swapColumns=!0,this.sortType=Zn.single,this.sorts=[],this.cssClasses={sortAscending:"datatable-icon-up",sortDescending:"datatable-icon-down",sortUnset:"datatable-icon-sort-unset",pagerLeftArrow:"datatable-icon-left",pagerRightArrow:"datatable-icon-right",pagerPrevious:"datatable-icon-prev",pagerNext:"datatable-icon-skip"},this.messages={emptyMessage:"No data to display",totalMessage:"total",selectedMessage:"selected"},this.groupExpansionDefault=!1,this.selectAllRowsOnPage=!1,this.virtualization=!0,this.summaryRow=!1,this.summaryHeight=30,this.summaryPosition="top",this.scroll=new r.vpe,this.activate=new r.vpe,this.select=new r.vpe,this.sort=new r.vpe,this.page=new r.vpe,this.reorder=new r.vpe,this.resize=new r.vpe,this.tableContextmenu=new r.vpe(!1),this.treeAction=new r.vpe,this.rowCount=0,this._offsetX=new e.X(0),this._count=0,this._offset=0,this._subscriptions=[],this.rowIdentity=ws=>this._groupRowsBy?ws.key:ws,this.element=gn.nativeElement,this.rowDiffer=vi.find({}).create(),this.configuration&&this.configuration.messages&&(this.messages=Object.assign({},this.configuration.messages))}set rows(ie){this._rows=ie,ie&&(this._internalRows=[...ie]),this.externalSorting||this.sortInternalRows(),this._internalRows=an(this._internalRows,ze(this.treeFromRelation),ze(this.treeToRelation)),this.recalculate(),this._rows&&this._groupRowsBy&&(this.groupedRows=this.groupArrayBy(this._rows,this._groupRowsBy)),this.cd.markForCheck()}get rows(){return this._rows}set groupRowsBy(ie){ie&&(this._groupRowsBy=ie,this._rows&&this._groupRowsBy&&(this.groupedRows=this.groupArrayBy(this._rows,this._groupRowsBy)))}get groupRowsBy(){return this._groupRowsBy}set columns(ie){ie&&(this._internalColumns=[...ie],gr(this._internalColumns),this.recalculateColumns()),this._columns=ie}get columns(){return this._columns}set limit(ie){this._limit=ie,this.recalculate()}get limit(){return this._limit}set count(ie){this._count=ie,this.recalculate()}get count(){return this._count}set offset(ie){this._offset=ie}get offset(){return Math.max(Math.min(this._offset,Math.ceil(this.rowCount/this.pageSize)-1),0)}get isFixedHeader(){const ie=this.headerHeight;return"string"!=typeof ie||"auto"!==ie}get isFixedRow(){return"auto"!==this.rowHeight}get isVertScroll(){return this.scrollbarV}get isVirtualized(){return this.virtualization}get isHorScroll(){return this.scrollbarH}get isSelectable(){return void 0!==this.selectionType}get isCheckboxSelection(){return this.selectionType===tr.checkbox}get isCellSelection(){return this.selectionType===tr.cell}get isSingleSelection(){return this.selectionType===tr.single}get isMultiSelection(){return this.selectionType===tr.multi}get isMultiClickSelection(){return this.selectionType===tr.multiClick}set columnTemplates(ie){this._columnTemplates=ie,this.translateColumns(ie)}get columnTemplates(){return this._columnTemplates}get allRowsSelected(){let ie=this.rows&&this.selected&&this.selected.length===this.rows.length;if(this.bodyComponent&&this.selectAllRowsOnPage){const Ze=this.bodyComponent.indexes;ie=this.selected.length===Ze.last-Ze.first}return this.selected&&this.rows&&0!==this.rows.length&&ie}ngOnInit(){this.recalculate()}ngAfterViewInit(){this.externalSorting||this.sortInternalRows(),!(typeof requestAnimationFrame>"u")&&requestAnimationFrame(()=>{this.recalculate(),this.externalPaging&&this.scrollbarV&&this.page.emit({count:this.count,pageSize:this.pageSize,limit:this.limit,offset:0})})}ngAfterContentInit(){this.columnTemplates.changes.subscribe(ie=>this.translateColumns(ie)),this.listenForColumnInputChanges()}translateColumns(ie){if(ie){const Ze=ie.toArray();Ze.length&&(this._internalColumns=function _r(gt){const Tn=[];for(const ie of gt){const Ze={},Jt=Object.getOwnPropertyNames(ie);for(const gn of Jt)Ze[gn]=ie[gn];ie.headerTemplate&&(Ze.headerTemplate=ie.headerTemplate),ie.cellTemplate&&(Ze.cellTemplate=ie.cellTemplate),ie.summaryFunc&&(Ze.summaryFunc=ie.summaryFunc),ie.summaryTemplate&&(Ze.summaryTemplate=ie.summaryTemplate),Tn.push(Ze)}return Tn}(Ze),gr(this._internalColumns),this.recalculateColumns(),this.sortInternalRows(),this.cd.markForCheck())}}groupArrayBy(ie,Ze){const Jt=new Map;return ie.forEach(Bi=>{const Xi=Bi[Ze];Jt.has(Xi)?Jt.get(Xi).push(Bi):Jt.set(Xi,[Bi])}),Array.from(Jt,Bi=>((Bi,Xi)=>({key:Bi,value:Xi}))(Bi[0],Bi[1]))}ngDoCheck(){this.rowDiffer.diff(this.rows)&&(this.externalSorting?this._internalRows=[...this.rows]:this.sortInternalRows(),this._internalRows=an(this._internalRows,ze(this.treeFromRelation),ze(this.treeToRelation)),this.recalculatePages(),this.cd.markForCheck())}recalculate(){this.recalculateDims(),this.recalculateColumns(),this.cd.markForCheck()}onWindowResize(){this.recalculate()}recalculateColumns(ie=this._internalColumns,Ze=-1,Jt=this.scrollbarH){if(!ie)return;let gn=this._innerWidth;return this.scrollbarV&&(gn-=this.scrollbarHelper.width),this.columnMode===Pr.force?function gs(gt,Tn,ie,Ze,Jt=300){const gn=gt.slice(ie+1,gt.length).filter(Js=>!1!==Js.canAutoResize);for(const Js of gn)Js.$$oldWidth||(Js.$$oldWidth=Js.width);let vi=0,Bi=!1,Xi=la(gt,Jt),ws=Tn-Xi;const ds=[];do{vi=ws/gn.length,Bi=Xi>=Tn;for(const Js of gn){if(Bi&&Ze)Js.width=Js.$$oldWidth||Js.width||Jt;else{const Ll=(Js.width||Jt)+vi;Js.minWidth&&LlJs.maxWidth?(Js.width=Js.maxWidth,ds.push(Js)):Js.width=Ll}Js.width=Math.max(0,Js.width)}Xi=la(gt),ws=Tn-Xi,Is(gn,ds)}while(ws>1&&0!==gn.length)}(ie,gn,Ze,Jt):this.columnMode===Pr.flex&&function jo(gt,Tn){const ie=function _s(gt,Tn){let ie=0;for(const Ze of gt)ie+=Tn&&Ze[Tn]?Ze[Tn]:Ze.width;return ie}(gt),Ze=function ts(gt){let Tn=0;for(const ie of gt)Tn+=ie.flexGrow||0;return Tn}(gt),Jt=Ci(gt);ie!==Tn&&function ss(gt,Tn,ie){for(const gn in gt)for(const vi of gt[gn])vi.canAutoResize?vi.width=0:(Tn-=vi.width,ie-=vi.flexGrow?vi.flexGrow:0);const Ze={};let Jt=Tn;do{const gn=Jt/ie;Jt=0;for(const vi in gt)for(const Bi of gt[vi])if(Bi.canAutoResize&&!Ze[Bi.prop]){const Xi=Bi.width+Bi.flexGrow*gn;void 0!==Bi.minWidth&&Xi((vi=Object.assign({},vi)).$$id===ie.$$id&&(Jt=Bi,vi.width=Ze,vi.$$oldWidth=Ze),vi));this.recalculateColumns(gn,Jt),this._internalColumns=gn,this.resize.emit({column:ie,newValue:Ze})}onColumnReorder({column:ie,newValue:Ze,prevValue:Jt}){const gn=this._internalColumns.map(vi=>Object.assign({},vi));if(this.swapColumns){const vi=gn[Ze];gn[Ze]=ie,gn[Jt]=vi}else if(Ze>Jt){const vi=gn[Jt];for(let Bi=Jt;BiZe;Bi--)gn[Bi]=gn[Bi-1];gn[Ze]=vi}this._internalColumns=gn,this.reorder.emit({column:ie,newValue:Ze,prevValue:Jt})}onColumnSort(ie){this.selectAllRowsOnPage&&(this.selected=[],this.select.emit({selected:this.selected})),this.sorts=ie.sorts,!1===this.externalSorting&&this.sortInternalRows(),this._internalRows=an(this._internalRows,ze(this.treeFromRelation),ze(this.treeToRelation)),this.offset=0,this.bodyComponent.updateOffsetY(this.offset),this.sort.emit(ie)}onHeaderSelect(ie){if(this.bodyComponent&&this.selectAllRowsOnPage){const Ze=this.bodyComponent.indexes.first,Jt=this.bodyComponent.indexes.last,gn=this.selected.length===Jt-Ze;this.selected=[],gn||this.selected.push(...this._internalRows.slice(Ze,Jt))}else{const Ze=this.selected.length===this.rows.length;this.selected=[],Ze||this.selected.push(...this.rows)}this.select.emit({selected:this.selected})}onBodySelect(ie){this.select.emit(ie)}onTreeAction(ie){const Ze=ie.row,Jt=this._rows.findIndex(gn=>gn[this.treeToRelation]===ie.row[this.treeToRelation]);this.treeAction.emit({row:Ze,rowIndex:Jt})}ngOnDestroy(){this._subscriptions.forEach(ie=>ie.unsubscribe())}listenForColumnInputChanges(){this._subscriptions.push(this.columnChangesService.columnInputChanges$.subscribe(()=>{this.columnTemplates&&this.columnTemplates.notifyOnChanges()}))}sortInternalRows(){this._internalRows=function qa(gt,Tn,ie){if(!gt)return[];if(!ie||!ie.length||!Tn)return[...gt];const Ze=new Map;gt.forEach((Bi,Xi)=>Ze.set(Bi,Xi));const Jt=[...gt],gn=Tn.reduce((Bi,Xi)=>(Xi.comparator&&"function"==typeof Xi.comparator&&(Bi[Xi.prop]=Xi.comparator),Bi),{}),vi=ie.map(Bi=>{const Xi=Bi.prop;return{prop:Xi,dir:Bi.dir,valueGetter:jt(Xi),compareFn:gn[Xi]||gl}});return Jt.sort(function(Bi,Xi){for(const ws of vi){const{prop:ds,valueGetter:qs}=ws,Js=qs(Bi,ds),Ll=qs(Xi,ds),vl=ws.dir!==Ro.desc?ws.compareFn(Js,Ll,Bi,Xi,ws.dir):-ws.compareFn(Js,Ll,Bi,Xi,ws.dir);if(0!==vl)return vl}return Ze.has(Bi)&&Ze.has(Xi)?Ze.get(Bi)div{display:flex}.ngx-datatable .datatable-footer{display:block;overflow:auto;width:100%}.ngx-datatable .datatable-footer .datatable-footer-inner{align-items:center;display:flex;width:100%}.ngx-datatable .datatable-footer .selected-count .page-count{flex:1 1 40%}.ngx-datatable .datatable-footer .selected-count .datatable-pager{flex:1 1 60%}.ngx-datatable .datatable-footer .page-count{flex:1 1 20%}.ngx-datatable .datatable-footer .datatable-pager{flex:1 1 80%;text-align:right}.ngx-datatable .datatable-footer .datatable-pager .pager,.ngx-datatable .datatable-footer .datatable-pager .pager li{display:inline-block;list-style:none;margin:0;padding:0}.ngx-datatable .datatable-footer .datatable-pager .pager li,.ngx-datatable .datatable-footer .datatable-pager .pager li a{outline:none}.ngx-datatable .datatable-footer .datatable-pager .pager li a{cursor:pointer;display:inline-block}.ngx-datatable .datatable-footer .datatable-pager .pager li.disabled a{cursor:not-allowed}"],encapsulation:2,changeDetection:0}),(0,m.gn)([es(5)],gt.prototype,"onWindowResize",null),gt})(),$a=(()=>{class gt{constructor(ie){this.cd=ie,this.sort=new r.vpe,this.select=new r.vpe,this.columnContextmenu=new r.vpe(!1),this.sortFn=this.onSort.bind(this),this.selectFn=this.select.emit.bind(this.select),this.cellContext={column:this.column,sortDir:this.sortDir,sortFn:this.sortFn,allRowsSelected:this.allRowsSelected,selectFn:this.selectFn}}set allRowsSelected(ie){this._allRowsSelected=ie,this.cellContext.allRowsSelected=ie}get allRowsSelected(){return this._allRowsSelected}set column(ie){this._column=ie,this.cellContext.column=ie,this.cd.markForCheck()}get column(){return this._column}set sorts(ie){this._sorts=ie,this.sortDir=this.calcSortDir(ie),this.cellContext.sortDir=this.sortDir,this.sortClass=this.calcSortClass(this.sortDir),this.cd.markForCheck()}get sorts(){return this._sorts}get columnCssClasses(){let ie="datatable-header-cell";if(this.column.sortable&&(ie+=" sortable"),this.column.resizeable&&(ie+=" resizeable"),this.column.headerClass)if("string"==typeof this.column.headerClass)ie+=" "+this.column.headerClass;else if("function"==typeof this.column.headerClass){const Jt=this.column.headerClass({column:this.column});if("string"==typeof Jt)ie+=Jt;else if("object"==typeof Jt){const gn=Object.keys(Jt);for(const vi of gn)!0===Jt[vi]&&(ie+=` ${vi}`)}}const Ze=this.sortDir;return Ze&&(ie+=` sort-active sort-${Ze}`),ie}get name(){return void 0===this.column.headerTemplate?this.column.name:void 0}get minWidth(){return this.column.minWidth}get maxWidth(){return this.column.maxWidth}get width(){return this.column.width}get isCheckboxable(){return this.column.checkboxable&&this.column.headerCheckboxable&&this.selectionType===tr.checkbox}onContextmenu(ie){this.columnContextmenu.emit({event:ie,column:this.column})}ngOnInit(){this.sortClass=this.calcSortClass(this.sortDir)}calcSortDir(ie){if(ie&&this.column){const Ze=ie.find(Jt=>Jt.prop===this.column.prop);if(Ze)return Ze.dir}}onSort(){if(!this.column.sortable)return;const ie=function jl(gt,Tn){return gt===Zn.single?Tn===Ro.asc?Ro.desc:Ro.asc:Tn?Tn===Ro.asc?Ro.desc:void 0:Ro.asc}(this.sortType,this.sortDir);this.sort.emit({column:this.column,prevValue:this.sortDir,newValue:ie})}calcSortClass(ie){if(this.cellContext.column.sortable)return ie===Ro.asc?`sort-btn sort-asc ${this.sortAscendingIcon}`:ie===Ro.desc?`sort-btn sort-desc ${this.sortDescendingIcon}`:`sort-btn ${this.sortUnsetIcon}`}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.sBO))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-header-cell"]],hostAttrs:[1,"datatable-header-cell"],hostVars:11,hostBindings:function(ie,Ze){1&ie&&r.NdJ("contextmenu",function(gn){return Ze.onContextmenu(gn)}),2&ie&&(r.uIk("title",Ze.name),r.Tol(Ze.columnCssClasses),r.Udp("min-width",Ze.minWidth,"px")("max-width",Ze.maxWidth,"px")("width",Ze.width,"px")("height",Ze.headerHeight,"px"))},inputs:{allRowsSelected:"allRowsSelected",column:"column",sorts:"sorts",sortType:"sortType",sortAscendingIcon:"sortAscendingIcon",sortDescendingIcon:"sortDescendingIcon",sortUnsetIcon:"sortUnsetIcon",isTarget:"isTarget",targetMarkerTemplate:"targetMarkerTemplate",targetMarkerContext:"targetMarkerContext",selectionType:"selectionType",headerHeight:"headerHeight"},outputs:{sort:"sort",select:"select",columnContextmenu:"columnContextmenu"},decls:6,vars:6,consts:[[1,"datatable-header-cell-template-wrap"],[4,"ngIf"],["class","datatable-checkbox",4,"ngIf"],["class","datatable-header-cell-wrapper",4,"ngIf"],[3,"click"],[3,"ngTemplateOutlet","ngTemplateOutletContext"],[1,"datatable-checkbox"],["type","checkbox",3,"checked","change"],[1,"datatable-header-cell-wrapper"],[1,"datatable-header-cell-label","draggable",3,"innerHTML","click"]],template:function(ie,Ze){1&ie&&(r.TgZ(0,"div",0),r.YNc(1,Te,1,2,null,1),r.YNc(2,$e,2,1,"label",2),r.YNc(3,ge,2,1,"span",3),r.YNc(4,ot,1,2,null,1),r.TgZ(5,"span",4),r.NdJ("click",function(){return Ze.onSort()}),r.qZA()()),2&ie&&(r.xp6(1),r.Q6J("ngIf",Ze.isTarget),r.xp6(1),r.Q6J("ngIf",Ze.isCheckboxable),r.xp6(1),r.Q6J("ngIf",!Ze.column.headerTemplate),r.xp6(1),r.Q6J("ngIf",Ze.column.headerTemplate),r.xp6(1),r.Tol(Ze.sortClass))},dependencies:[a.O5,a.tP],encapsulation:2,changeDetection:0}),gt})(),Rl=(()=>{class gt{constructor(){this.selectedCount=0,this.page=new r.vpe}get isVisible(){return this.rowCount/this.pageSize>1}get curPage(){return this.offset+1}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-footer"]],hostAttrs:[1,"datatable-footer"],inputs:{selectedCount:"selectedCount",footerHeight:"footerHeight",rowCount:"rowCount",pageSize:"pageSize",offset:"offset",pagerLeftArrowIcon:"pagerLeftArrowIcon",pagerRightArrowIcon:"pagerRightArrowIcon",pagerPreviousIcon:"pagerPreviousIcon",pagerNextIcon:"pagerNextIcon",totalMessage:"totalMessage",footerTemplate:"footerTemplate",selectedMessage:"selectedMessage"},outputs:{page:"page"},decls:4,vars:8,consts:[[1,"datatable-footer-inner",3,"ngClass"],[4,"ngIf"],["class","page-count",4,"ngIf"],[3,"pagerLeftArrowIcon","pagerRightArrowIcon","pagerPreviousIcon","pagerNextIcon","page","size","count","hidden","change",4,"ngIf"],[3,"ngTemplateOutlet","ngTemplateOutletContext"],[1,"page-count"],[3,"pagerLeftArrowIcon","pagerRightArrowIcon","pagerPreviousIcon","pagerNextIcon","page","size","count","hidden","change"]],template:function(ie,Ze){1&ie&&(r.TgZ(0,"div",0),r.YNc(1,He,1,8,null,1),r.YNc(2,Le,3,3,"div",2),r.YNc(3,Pt,1,8,"datatable-pager",3),r.qZA()),2&ie&&(r.Udp("height",Ze.footerHeight,"px"),r.Q6J("ngClass",r.VKq(6,it,Ze.selectedMessage)),r.xp6(1),r.Q6J("ngIf",Ze.footerTemplate),r.xp6(1),r.Q6J("ngIf",!Ze.footerTemplate),r.xp6(1),r.Q6J("ngIf",!Ze.footerTemplate))},dependencies:function(){return[a.mk,a.O5,a.tP,Ji]},encapsulation:2,changeDetection:0}),gt})(),Ji=(()=>{class gt{constructor(){this.change=new r.vpe,this._count=0,this._page=1,this._size=0}set size(ie){this._size=ie,this.pages=this.calcPages()}get size(){return this._size}set count(ie){this._count=ie,this.pages=this.calcPages()}get count(){return this._count}set page(ie){this._page=ie,this.pages=this.calcPages()}get page(){return this._page}get totalPages(){const ie=this.size<1?1:Math.ceil(this.count/this.size);return Math.max(ie||0,1)}canPrevious(){return this.page>1}canNext(){return this.page0&&ie<=this.totalPages&&ie!==this.page&&(this.page=ie,this.change.emit({page:ie}))}calcPages(ie){const Ze=[];let Jt=1,gn=this.totalPages;ie=ie||this.page,5this.totalPages&&(Jt=Math.max(this.totalPages-5+1,1),gn=this.totalPages));for(let Xi=Jt;Xi<=gn;Xi++)Ze.push({number:Xi,text:Xi});return Ze}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-pager"]],hostAttrs:[1,"datatable-pager"],inputs:{size:"size",count:"count",page:"page",pagerLeftArrowIcon:"pagerLeftArrowIcon",pagerRightArrowIcon:"pagerRightArrowIcon",pagerPreviousIcon:"pagerPreviousIcon",pagerNextIcon:"pagerNextIcon"},outputs:{change:"change"},decls:14,vars:21,consts:[[1,"pager"],["role","button","aria-label","go to first page","href","javascript:void(0)",3,"click"],["role","button","aria-label","go to previous page","href","javascript:void(0)",3,"click"],["role","button","class","pages",3,"active",4,"ngFor","ngForOf"],["role","button","aria-label","go to next page","href","javascript:void(0)",3,"click"],["role","button","aria-label","go to last page","href","javascript:void(0)",3,"click"],["role","button",1,"pages"],["href","javascript:void(0)",3,"click"]],template:function(ie,Ze){1&ie&&(r.TgZ(0,"ul",0)(1,"li")(2,"a",1),r.NdJ("click",function(){return Ze.selectPage(1)}),r._UZ(3,"i"),r.qZA()(),r.TgZ(4,"li")(5,"a",2),r.NdJ("click",function(){return Ze.prevPage()}),r._UZ(6,"i"),r.qZA()(),r.YNc(7,Xt,3,4,"li",3),r.TgZ(8,"li")(9,"a",4),r.NdJ("click",function(){return Ze.nextPage()}),r._UZ(10,"i"),r.qZA()(),r.TgZ(11,"li")(12,"a",5),r.NdJ("click",function(){return Ze.selectPage(Ze.totalPages)}),r._UZ(13,"i"),r.qZA()()()),2&ie&&(r.xp6(1),r.ekj("disabled",!Ze.canPrevious()),r.xp6(2),r.Tol(Ze.pagerPreviousIcon),r.xp6(1),r.ekj("disabled",!Ze.canPrevious()),r.xp6(2),r.Tol(Ze.pagerLeftArrowIcon),r.xp6(1),r.Q6J("ngForOf",Ze.pages),r.xp6(1),r.ekj("disabled",!Ze.canNext()),r.xp6(2),r.Tol(Ze.pagerRightArrowIcon),r.xp6(1),r.ekj("disabled",!Ze.canNext()),r.xp6(2),r.Tol(Ze.pagerNextIcon))},dependencies:[a.sg],encapsulation:2,changeDetection:0}),gt})(),Ha=(()=>{class gt{}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-progress"]],decls:3,vars:0,consts:[["role","progressbar",1,"progress-linear"],[1,"container"],[1,"bar"]],template:function(ie,Ze){1&ie&&(r.TgZ(0,"div",0)(1,"div",1),r._UZ(2,"div",2),r.qZA()())},encapsulation:2,changeDetection:0}),gt})();var Ts=(()=>{return(gt=Ts||(Ts={}))[gt.up=38]="up",gt[gt.down=40]="down",gt[gt.return=13]="return",gt[gt.escape=27]="escape",gt[gt.left=37]="left",gt[gt.right=39]="right",Ts;var gt})();let hs=(()=>{class gt{constructor(ie,Ze,Jt,gn){this.differs=ie,this.scrollbarHelper=Ze,this.cd=Jt,this.treeStatus="collapsed",this.activate=new r.vpe,this.treeAction=new r.vpe,this._groupStyles={left:{},center:{},right:{}},this._element=gn.nativeElement,this._rowDiffer=ie.find({}).create()}set columns(ie){this._columns=ie,this.recalculateColumns(ie),this.buildStylesByGroup()}get columns(){return this._columns}set innerWidth(ie){if(this._columns){const Ze=Ci(this._columns);this._columnGroupWidths=Ai(Ze,this._columns)}this._innerWidth=ie,this.recalculateColumns(),this.buildStylesByGroup()}get innerWidth(){return this._innerWidth}set offsetX(ie){this._offsetX=ie,this.buildStylesByGroup()}get offsetX(){return this._offsetX}get cssClass(){let ie="datatable-body-row";if(this.isSelected&&(ie+=" active"),this.rowIndex%2!=0&&(ie+=" datatable-row-odd"),this.rowIndex%2==0&&(ie+=" datatable-row-even"),this.rowClass){const Ze=this.rowClass(this.row);if("string"==typeof Ze)ie+=` ${Ze}`;else if("object"==typeof Ze){const Jt=Object.keys(Ze);for(const gn of Jt)!0===Ze[gn]&&(ie+=` ${gn}`)}}return ie}get columnsTotalWidths(){return this._columnGroupWidths.total}ngDoCheck(){this._rowDiffer.diff(this.row)&&this.cd.markForCheck()}trackByGroups(ie,Ze){return Ze.type}columnTrackingFn(ie,Ze){return Ze.$$id}buildStylesByGroup(){this._groupStyles.left=this.calcStylesByGroup("left"),this._groupStyles.center=this.calcStylesByGroup("center"),this._groupStyles.right=this.calcStylesByGroup("right"),this.cd.markForCheck()}calcStylesByGroup(ie){const Ze=this._columnGroupWidths,Jt=this.offsetX,gn={width:`${Ze[ie]}px`};if("left"===ie)Bn(gn,Jt,0);else if("right"===ie){const vi=parseInt(this.innerWidth+"",0);Bn(gn,-1*(Ze.total-vi-Jt+this.scrollbarHelper.width),0)}return gn}onActivate(ie,Ze){ie.cellIndex=Ze,ie.rowElement=this._element,this.activate.emit(ie)}onKeyDown(ie){const Ze=ie.keyCode;(Ze===Ts.return||Ze===Ts.down||Ze===Ts.up||Ze===Ts.left||Ze===Ts.right)&&ie.target===this._element&&(ie.preventDefault(),ie.stopPropagation(),this.activate.emit({type:"keydown",event:ie,row:this.row,rowElement:this._element}))}onMouseenter(ie){this.activate.emit({type:"mouseenter",event:ie,row:this.row,rowElement:this._element})}recalculateColumns(ie=this.columns){this._columns=ie;const Ze=Ci(this._columns);this._columnsByPin=dr(this._columns),this._columnGroupWidths=Ai(Ze,this._columns)}onTreeAction(){this.treeAction.emit()}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.aQg),r.Y36(so,4),r.Y36(r.sBO),r.Y36(r.SBq))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-body-row"]],hostVars:6,hostBindings:function(ie,Ze){1&ie&&r.NdJ("keydown",function(gn){return Ze.onKeyDown(gn)})("mouseenter",function(gn){return Ze.onMouseenter(gn)}),2&ie&&(r.Tol(Ze.cssClass),r.Udp("width",Ze.columnsTotalWidths,"px")("height",Ze.rowHeight,"px"))},inputs:{treeStatus:"treeStatus",columns:"columns",innerWidth:"innerWidth",offsetX:"offsetX",expanded:"expanded",rowClass:"rowClass",row:"row",group:"group",isSelected:"isSelected",rowIndex:"rowIndex",displayCheck:"displayCheck",rowHeight:"rowHeight"},outputs:{activate:"activate",treeAction:"treeAction"},decls:1,vars:2,consts:[[3,"class","ngStyle",4,"ngFor","ngForOf","ngForTrackBy"],[3,"ngStyle"],["tabindex","-1",3,"row","group","expanded","isSelected","rowIndex","column","rowHeight","displayCheck","treeStatus","activate","treeAction",4,"ngFor","ngForOf","ngForTrackBy"],["tabindex","-1",3,"row","group","expanded","isSelected","rowIndex","column","rowHeight","displayCheck","treeStatus","activate","treeAction"]],template:function(ie,Ze){1&ie&&r.YNc(0,pn,2,6,"div",0),2&ie&&r.Q6J("ngForOf",Ze._columnsByPin)("ngForTrackBy",Ze.trackByGroups)},dependencies:function(){return[a.sg,a.PC,Aa]},encapsulation:2,changeDetection:0}),gt})(),$s=(()=>{class gt{constructor(ie,Ze){this.cd=ie,this.differs=Ze,this.rowContextmenu=new r.vpe(!1),this.groupContext={group:this.row,expanded:this.expanded,rowIndex:this.rowIndex},this.rowContext={row:this.row,expanded:this.expanded,rowIndex:this.rowIndex},this._expanded=!1,this.rowDiffer=Ze.find({}).create()}set rowIndex(ie){this._rowIndex=ie,this.rowContext.rowIndex=ie,this.groupContext.rowIndex=ie,this.cd.markForCheck()}get rowIndex(){return this._rowIndex}set expanded(ie){this._expanded=ie,this.groupContext.expanded=ie,this.rowContext.expanded=ie,this.cd.markForCheck()}get expanded(){return this._expanded}ngDoCheck(){this.rowDiffer.diff(this.row)&&(this.rowContext.row=this.row,this.groupContext.group=this.row,this.cd.markForCheck())}onContextmenu(ie){this.rowContextmenu.emit({event:ie,row:this.row})}getGroupHeaderStyle(){const ie={};return ie.transform="translate3d("+this.offsetX+"px, 0px, 0px)",ie["backface-visibility"]="hidden",ie.width=this.innerWidth,ie}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.sBO),r.Y36(r.aQg))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-row-wrapper"]],hostAttrs:[1,"datatable-row-wrapper"],hostBindings:function(ie,Ze){1&ie&&r.NdJ("contextmenu",function(gn){return Ze.onContextmenu(gn)})},inputs:{rowIndex:"rowIndex",expanded:"expanded",innerWidth:"innerWidth",rowDetail:"rowDetail",groupHeader:"groupHeader",offsetX:"offsetX",detailRowHeight:"detailRowHeight",row:"row",groupedRows:"groupedRows"},outputs:{rowContextmenu:"rowContextmenu"},ngContentSelectors:T,decls:3,vars:3,consts:[["class","datatable-group-header",3,"ngStyle",4,"ngIf"],[4,"ngIf"],["class","datatable-row-detail",3,"height",4,"ngIf"],[1,"datatable-group-header",3,"ngStyle"],[3,"ngTemplateOutlet","ngTemplateOutletContext"],[1,"datatable-row-detail"]],template:function(ie,Ze){1&ie&&(r.F$t(),r.YNc(0,qt,2,2,"div",0),r.YNc(1,sn,1,0,"ng-content",1),r.YNc(2,Kr,2,3,"div",2)),2&ie&&(r.Q6J("ngIf",Ze.groupHeader&&Ze.groupHeader.template),r.xp6(1),r.Q6J("ngIf",Ze.groupHeader&&Ze.groupHeader.template&&Ze.expanded||!Ze.groupHeader||!Ze.groupHeader.template),r.xp6(1),r.Q6J("ngIf",Ze.rowDetail&&Ze.rowDetail.template&&Ze.expanded))},dependencies:[a.O5,a.tP,a.PC],encapsulation:2,changeDetection:0}),gt})(),Aa=(()=>{class gt{constructor(ie,Ze){this.cd=Ze,this.activate=new r.vpe,this.treeAction=new r.vpe,this.isFocused=!1,this.onCheckboxChangeFn=this.onCheckboxChange.bind(this),this.activateFn=this.activate.emit.bind(this.activate),this.cellContext={onCheckboxChangeFn:this.onCheckboxChangeFn,activateFn:this.activateFn,row:this.row,group:this.group,value:this.value,column:this.column,rowHeight:this.rowHeight,isSelected:this.isSelected,rowIndex:this.rowIndex,treeStatus:this.treeStatus,onTreeAction:this.onTreeAction.bind(this)},this._element=ie.nativeElement}set group(ie){this._group=ie,this.cellContext.group=ie,this.checkValueUpdates(),this.cd.markForCheck()}get group(){return this._group}set rowHeight(ie){this._rowHeight=ie,this.cellContext.rowHeight=ie,this.checkValueUpdates(),this.cd.markForCheck()}get rowHeight(){return this._rowHeight}set isSelected(ie){this._isSelected=ie,this.cellContext.isSelected=ie,this.cd.markForCheck()}get isSelected(){return this._isSelected}set expanded(ie){this._expanded=ie,this.cellContext.expanded=ie,this.cd.markForCheck()}get expanded(){return this._expanded}set rowIndex(ie){this._rowIndex=ie,this.cellContext.rowIndex=ie,this.checkValueUpdates(),this.cd.markForCheck()}get rowIndex(){return this._rowIndex}set column(ie){this._column=ie,this.cellContext.column=ie,this.checkValueUpdates(),this.cd.markForCheck()}get column(){return this._column}set row(ie){this._row=ie,this.cellContext.row=ie,this.checkValueUpdates(),this.cd.markForCheck()}get row(){return this._row}set sorts(ie){this._sorts=ie,this.calcSortDir=this.calcSortDir(ie)}get sorts(){return this._sorts}set treeStatus(ie){this._treeStatus="collapsed"!==ie&&"expanded"!==ie&&"loading"!==ie&&"disabled"!==ie?"collapsed":ie,this.cellContext.treeStatus=this._treeStatus,this.checkValueUpdates(),this.cd.markForCheck()}get treeStatus(){return this._treeStatus}get columnCssClasses(){let ie="datatable-body-cell";if(this.column.cellClass)if("string"==typeof this.column.cellClass)ie+=" "+this.column.cellClass;else if("function"==typeof this.column.cellClass){const Ze=this.column.cellClass({row:this.row,group:this.group,column:this.column,value:this.value,rowHeight:this.rowHeight});if("string"==typeof Ze)ie+=" "+Ze;else if("object"==typeof Ze){const Jt=Object.keys(Ze);for(const gn of Jt)!0===Ze[gn]&&(ie+=` ${gn}`)}}return this.sortDir||(ie+=" sort-active"),this.isFocused&&(ie+=" active"),this.sortDir===Ro.asc&&(ie+=" sort-asc"),this.sortDir===Ro.desc&&(ie+=" sort-desc"),ie}get width(){return this.column.width}get minWidth(){return this.column.minWidth}get maxWidth(){return this.column.maxWidth}get height(){const ie=this.rowHeight;return isNaN(ie)?ie:ie+"px"}ngDoCheck(){this.checkValueUpdates()}ngOnDestroy(){this.cellTemplate&&this.cellTemplate.clear()}checkValueUpdates(){let ie="";if(this.row&&this.column){const Ze=this.column.$$valueGetter(this.row,this.column.prop),Jt=this.column.pipe;Jt?ie=Jt.transform(Ze):void 0!==ie&&(ie=Ze)}else ie="";this.value!==ie&&(this.value=ie,this.cellContext.value=ie,this.sanitizedValue=null!=ie?this.stripHtml(ie):ie,this.cd.markForCheck())}onFocus(){this.isFocused=!0}onBlur(){this.isFocused=!1}onClick(ie){this.activate.emit({type:"click",event:ie,row:this.row,group:this.group,rowHeight:this.rowHeight,column:this.column,value:this.value,cellElement:this._element})}onDblClick(ie){this.activate.emit({type:"dblclick",event:ie,row:this.row,group:this.group,rowHeight:this.rowHeight,column:this.column,value:this.value,cellElement:this._element})}onKeyDown(ie){const Ze=ie.keyCode;(Ze===Ts.return||Ze===Ts.down||Ze===Ts.up||Ze===Ts.left||Ze===Ts.right)&&ie.target===this._element&&(ie.preventDefault(),ie.stopPropagation(),this.activate.emit({type:"keydown",event:ie,row:this.row,group:this.group,rowHeight:this.rowHeight,column:this.column,value:this.value,cellElement:this._element}))}onCheckboxChange(ie){this.activate.emit({type:"checkbox",event:ie,row:this.row,group:this.group,rowHeight:this.rowHeight,column:this.column,value:this.value,cellElement:this._element,treeStatus:"collapsed"})}calcSortDir(ie){if(!ie)return;const Ze=ie.find(Jt=>Jt.prop===this.column.prop);return Ze?Ze.dir:void 0}stripHtml(ie){return ie.replace?ie.replace(/<\/?[^>]+(>|$)/g,""):ie}onTreeAction(){this.treeAction.emit(this.row)}calcLeftMargin(ie,Ze){return ie.isTreeColumn?Ze.level*(null!=ie.treeLevelIndent?ie.treeLevelIndent:50):0}}return gt.\u0275fac=function(ie){return new(ie||gt)(r.Y36(r.SBq),r.Y36(r.sBO))},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-body-cell"]],viewQuery:function(ie,Ze){if(1&ie&&r.Gf(Or,7,r.s_b),2&ie){let Jt;r.iGM(Jt=r.CRH())&&(Ze.cellTemplate=Jt.first)}},hostVars:10,hostBindings:function(ie,Ze){1&ie&&r.NdJ("focus",function(){return Ze.onFocus()})("blur",function(){return Ze.onBlur()})("click",function(gn){return Ze.onClick(gn)})("dblclick",function(gn){return Ze.onDblClick(gn)})("keydown",function(gn){return Ze.onKeyDown(gn)}),2&ie&&(r.Tol(Ze.columnCssClasses),r.Udp("width",Ze.width,"px")("min-width",Ze.minWidth,"px")("max-width",Ze.maxWidth,"px")("height",Ze.height))},inputs:{group:"group",rowHeight:"rowHeight",isSelected:"isSelected",expanded:"expanded",rowIndex:"rowIndex",column:"column",row:"row",sorts:"sorts",treeStatus:"treeStatus",displayCheck:"displayCheck"},outputs:{activate:"activate",treeAction:"treeAction"},decls:5,vars:6,consts:[[1,"datatable-body-cell-label"],["class","datatable-checkbox",4,"ngIf"],[4,"ngIf"],[3,"title","innerHTML",4,"ngIf"],[1,"datatable-checkbox"],["type","checkbox",3,"checked","click"],["class","datatable-tree-button",3,"disabled","click",4,"ngIf"],[1,"datatable-tree-button",3,"disabled","click"],["class","icon datatable-icon-collapse",4,"ngIf"],["class","icon datatable-icon-up",4,"ngIf"],["class","icon datatable-icon-down",4,"ngIf"],[1,"icon","datatable-icon-collapse"],[1,"icon","datatable-icon-up"],[1,"icon","datatable-icon-down"],[3,"ngTemplateOutlet","ngTemplateOutletContext"],[3,"title","innerHTML"],["cellTemplate",""]],template:function(ie,Ze){1&ie&&(r.TgZ(0,"div",0),r.YNc(1,Lr,2,1,"label",1),r.YNc(2,wn,3,2,"ng-container",2),r.YNc(3,jn,1,2,"span",3),r.YNc(4,Oi,2,2,null,2),r.qZA()),2&ie&&(r.Udp("margin-left",Ze.calcLeftMargin(Ze.column,Ze.row),"px"),r.xp6(1),r.Q6J("ngIf",Ze.column.checkboxable&&(!Ze.displayCheck||Ze.displayCheck(Ze.row,Ze.column,Ze.value))),r.xp6(1),r.Q6J("ngIf",Ze.column.isTreeColumn),r.xp6(1),r.Q6J("ngIf",!Ze.column.cellTemplate),r.xp6(1),r.Q6J("ngIf",Ze.column.cellTemplate))},dependencies:[a.O5,a.tP],encapsulation:2,changeDetection:0}),gt})();function Ja(gt,Tn,ie){const Ze=ie(Tn,gt);return Ze>-1?gt.splice(Ze,1):gt.push(Tn),gt}let Xo=(()=>{class gt{constructor(){this.activate=new r.vpe,this.select=new r.vpe}selectRow(ie,Ze,Jt){if(!this.selectEnabled)return;const gn=this.selectionType===tr.checkbox,Bi=this.selectionType===tr.multiClick;let Xi=[];Xi=this.selectionType===tr.multi||gn||Bi?ie.shiftKey?function fa(gt,Tn,ie,Ze,Jt){const gn=ie=ie||!gn&&vi>=Ze&&vi<=ie)&&vi>=ds.start&&vi<=ds.end&>.push(Tn[vi])}return gt}([],this.rows,Ze,this.prevIndex,this.getRowSelectedIdx.bind(this)):Ja(ie.ctrlKey||ie.metaKey||Bi||gn?[...this.selected]:[],Jt,this.getRowSelectedIdx.bind(this)):Ja([],Jt,this.getRowSelectedIdx.bind(this)),"function"==typeof this.selectCheck&&(Xi=Xi.filter(this.selectCheck.bind(this))),this.selected.splice(0,this.selected.length),this.selected.push(...Xi),this.prevIndex=Ze,this.select.emit({selected:Xi})}onActivate(ie,Ze){const{type:Jt,event:gn,row:vi}=ie,Bi=this.selectionType===tr.checkbox;!Bi&&("click"===Jt||"dblclick"===Jt)||Bi&&"checkbox"===Jt?this.selectRow(gn,Ze,vi):"keydown"===Jt&&(gn.keyCode===Ts.return?this.selectRow(gn,Ze,vi):this.onKeyboardFocus(ie)),this.activate.emit(ie)}onKeyboardFocus(ie){const{keyCode:Ze}=ie.event;if(Ze===Ts.up||Ze===Ts.down||Ze===Ts.right||Ze===Ts.left){const gn=this.selectionType===tr.cell;ie.cellElement&&gn?gn&&this.focusCell(ie.cellElement,ie.rowElement,Ze,ie.cellIndex):this.focusRow(ie.rowElement,Ze)}}focusRow(ie,Ze){const Jt=this.getPrevNextRow(ie,Ze);Jt&&Jt.focus()}getPrevNextRow(ie,Ze){const Jt=ie.parentElement;if(Jt){let gn;if(Ze===Ts.up?gn=Jt.previousElementSibling:Ze===Ts.down&&(gn=Jt.nextElementSibling),gn&&gn.children.length)return gn.children[0]}}focusCell(ie,Ze,Jt,gn){let vi;if(Jt===Ts.left)vi=ie.previousElementSibling;else if(Jt===Ts.right)vi=ie.nextElementSibling;else if(Jt===Ts.up||Jt===Ts.down){const Bi=this.getPrevNextRow(Ze,Jt);if(Bi){const Xi=Bi.getElementsByClassName("datatable-body-cell");Xi.length&&(vi=Xi[gn])}}vi&&vi.focus()}getRowSelected(ie){return this.getRowSelectedIdx(ie,this.selected)>-1}getRowSelectedIdx(ie,Ze){if(!Ze||!Ze.length)return-1;const Jt=this.rowIdentity(ie);return Ze.findIndex(gn=>this.rowIdentity(gn)===Jt)}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-selection"]],inputs:{rows:"rows",selected:"selected",selectEnabled:"selectEnabled",selectionType:"selectionType",rowIdentity:"rowIdentity",selectCheck:"selectCheck"},outputs:{activate:"activate",select:"select"},ngContentSelectors:T,decls:1,vars:0,template:function(ie,Ze){1&ie&&(r.F$t(),r.Hsn(0))},encapsulation:2,changeDetection:0}),gt})();function No(gt){const Tn=gt.filter(ie=>!!ie);return!Tn.length||Tn.some(ie=>"number"!=typeof ie)?null:Tn.reduce((ie,Ze)=>ie+Ze)}function Cs(gt){return null}let ns=(()=>{class gt{constructor(){this.summaryRow={}}ngOnChanges(){!this.columns||!this.rows||(this.updateInternalColumns(),this.updateValues())}updateInternalColumns(){this._internalColumns=this.columns.map(ie=>Object.assign(Object.assign({},ie),{cellTemplate:ie.summaryTemplate}))}updateValues(){this.summaryRow={},this.columns.filter(ie=>!ie.summaryTemplate).forEach(ie=>{const Ze=this.rows.map(gn=>gn[ie.prop]),Jt=this.getSummaryFunction(ie);this.summaryRow[ie.prop]=ie.pipe?ie.pipe.transform(Jt(Ze)):Jt(Ze)})}getSummaryFunction(ie){return void 0===ie.summaryFunc?No:null===ie.summaryFunc?Cs:ie.summaryFunc}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275cmp=r.Xpm({type:gt,selectors:[["datatable-summary-row"]],hostAttrs:[1,"datatable-summary-row"],inputs:{rows:"rows",columns:"columns",rowHeight:"rowHeight",offsetX:"offsetX",innerWidth:"innerWidth"},features:[r.TTD],decls:1,vars:1,consts:[["tabindex","-1",3,"innerWidth","offsetX","columns","rowHeight","row","rowIndex",4,"ngIf"],["tabindex","-1",3,"innerWidth","offsetX","columns","rowHeight","row","rowIndex"]],template:function(ie,Ze){1&ie&&r.YNc(0,Wi,1,6,"datatable-body-row",0),2&ie&&r.Q6J("ngIf",Ze.summaryRow&&Ze._internalColumns)},dependencies:[a.O5,hs],encapsulation:2}),gt})(),Fo=(()=>{class gt{static forRoot(ie){return{ngModule:gt,providers:[{provide:"configuration",useValue:ie}]}}}return gt.\u0275fac=function(ie){return new(ie||gt)},gt.\u0275mod=r.oAB({type:gt}),gt.\u0275inj=r.cJS({providers:[so,kr,Ei],imports:[a.ez]}),gt})();typeof document<"u"&&!document.elementsFromPoint&&(document.elementsFromPoint=function io(gt,Tn){const ie=[],Ze=[];let Jt,gn,vi;for(;(Jt=document.elementFromPoint(gt,Tn))&&-1===ie.indexOf(Jt)&&null!=Jt;)ie.push(Jt),Ze.push({value:Jt.style.getPropertyValue("pointer-events"),priority:Jt.style.getPropertyPriority("pointer-events")}),Jt.style.setProperty("pointer-events","none","important");for(gn=Ze.length;vi=Ze[--gn];)ie[gn].style.setProperty("pointer-events",vi.value?vi.value:"",vi.priority);return ie})},67506:E=>{"use strict";function C(a,c,u){a instanceof RegExp&&(a=s(a,u)),c instanceof RegExp&&(c=s(c,u));var e=r(a,c,u);return e&&{start:e[0],end:e[1],pre:u.slice(0,e[0]),body:u.slice(e[0]+a.length,e[1]),post:u.slice(e[1]+c.length)}}function s(a,c){var u=c.match(a);return u?u[0]:null}function r(a,c,u){var e,f,m,T,M,w=u.indexOf(a),D=u.indexOf(c,w+1),U=w;if(w>=0&&D>0){if(a===c)return[w,D];for(e=[],m=u.length;U>=0&&!M;)U==w?(e.push(U),w=u.indexOf(a,U+1)):1==e.length?M=[e.pop(),D]:((f=e.pop())=0?w:D;e.length&&(M=[m,T])}return M}E.exports=C,C.range=r},96434:(E,C)=>{"use strict";C.byteLength=function m(W){var $=f(W),F=$[1];return 3*($[0]+F)/4-F},C.toByteArray=function M(W){var $,se,J=f(W),F=J[0],X=J[1],de=new a(function T(W,$,J){return 3*($+J)/4-J}(0,F,X)),V=0,ce=X>0?F-4:F;for(se=0;se>16&255,de[V++]=$>>8&255,de[V++]=255&$;return 2===X&&($=r[W.charCodeAt(se)]<<2|r[W.charCodeAt(se+1)]>>4,de[V++]=255&$),1===X&&($=r[W.charCodeAt(se)]<<10|r[W.charCodeAt(se+1)]<<4|r[W.charCodeAt(se+2)]>>2,de[V++]=$>>8&255,de[V++]=255&$),de},C.fromByteArray=function U(W){for(var $,J=W.length,F=J%3,X=[],V=0,ce=J-F;Vce?ce:V+16383));return 1===F?X.push(s[($=W[J-1])>>2]+s[$<<4&63]+"=="):2===F&&X.push(s[($=(W[J-2]<<8)+W[J-1])>>10]+s[$>>4&63]+s[$<<2&63]+"="),X.join("")};for(var s=[],r=[],a=typeof Uint8Array<"u"?Uint8Array:Array,c="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",u=0,e=c.length;u0)throw new Error("Invalid string. Length must be a multiple of 4");var J=W.indexOf("=");return-1===J&&(J=$),[J,J===$?0:4-J%4]}function w(W){return s[W>>18&63]+s[W>>12&63]+s[W>>6&63]+s[63&W]}function D(W,$,J){for(var X=[],de=$;de{var r=s(2665),a=s(67506);E.exports=function U(V){return V?("{}"===V.substr(0,2)&&(V="\\{\\}"+V.substr(2)),de(function M(V){return V.split("\\\\").join(c).split("\\{").join(u).split("\\}").join(e).split("\\,").join(f).split("\\.").join(m)}(V),!0).map(w)):[]};var c="\0SLASH"+Math.random()+"\0",u="\0OPEN"+Math.random()+"\0",e="\0CLOSE"+Math.random()+"\0",f="\0COMMA"+Math.random()+"\0",m="\0PERIOD"+Math.random()+"\0";function T(V){return parseInt(V,10)==V?parseInt(V,10):V.charCodeAt(0)}function w(V){return V.split(c).join("\\").split(u).join("{").split(e).join("}").split(f).join(",").split(m).join(".")}function D(V){if(!V)return[""];var ce=[],se=a("{","}",V);if(!se)return V.split(",");var Te=se.body,$e=se.post,ge=se.pre.split(",");ge[ge.length-1]+="{"+Te+"}";var Et=D($e);return $e.length&&(ge[ge.length-1]+=Et.shift(),ge.push.apply(ge,Et)),ce.push.apply(ce,ge),ce}function $(V){return"{"+V+"}"}function J(V){return/^-?0\d/.test(V)}function F(V,ce){return V<=ce}function X(V,ce){return V>=ce}function de(V,ce){var se=[],fe=a("{","}",V);if(!fe||/\$$/.test(fe.pre))return[V];var ot,Te=/^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(fe.body),$e=/^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(fe.body),ge=Te||$e,Et=fe.body.indexOf(",")>=0;if(!ge&&!Et)return fe.post.match(/,.*\}/)?de(V=fe.pre+"{"+fe.body+e+fe.post):[V];if(ge)ot=fe.body.split(/\.\./);else if(1===(ot=D(fe.body)).length&&1===(ot=de(ot[0],!1).map($)).length)return(qe=fe.post.length?de(fe.post,!1):[""]).map(function(Lr){return fe.pre+ot[0]+Lr});var He,ct=fe.pre,qe=fe.post.length?de(fe.post,!1):[""];if(ge){var We=T(ot[0]),Le=T(ot[1]),Pt=Math.max(ot[0].length,ot[1].length),it=3==ot.length?Math.abs(T(ot[2])):1,Xt=F;Le0){var sn=new Array(qt+1).join("0");At=Rn<0?"-"+sn+At.slice(1):sn+At}}He.push(At)}}else He=r(ot,function(Or){return de(Or,!1)});for(var fn=0;fn{"use strict";var r=s(18540),a=s(60044),c=a(r("String.prototype.indexOf"));E.exports=function(e,f){var m=r(e,!!f);return"function"==typeof m&&c(e,".prototype.")>-1?a(m):m}},60044:(E,C,s)=>{"use strict";var r=s(75396),a=s(18540),c=a("%Function.prototype.apply%"),u=a("%Function.prototype.call%"),e=a("%Reflect.apply%",!0)||r.call(u,c),f=a("%Object.getOwnPropertyDescriptor%",!0),m=a("%Object.defineProperty%",!0),T=a("%Math.max%");if(m)try{m({},"a",{value:1})}catch{m=null}E.exports=function(D){var U=e(r,u,arguments);return f&&m&&f(U,"length").configurable&&m(U,"length",{value:1+T(0,D.length-(arguments.length-1))}),U};var M=function(){return e(r,c,arguments)};m?m(E.exports,"apply",{value:M}):E.exports.apply=M},72318:E=>{var C=!!(typeof window<"u"&&window.document&&window.document.createElement);E.exports=C},6823:function(E,C,s){E.exports=function(r){"use strict";r=r&&r.hasOwnProperty("default")?r.default:r;var u={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]},e=function a(h,b){return h(b={exports:{}},b.exports),b.exports}(function(h){var b={};for(var N in u)u.hasOwnProperty(N)&&(b[u[N]]=N);var k=h.exports={rgb:{channels:3,labels:"rgb"},hsl:{channels:3,labels:"hsl"},hsv:{channels:3,labels:"hsv"},hwb:{channels:3,labels:"hwb"},cmyk:{channels:4,labels:"cmyk"},xyz:{channels:3,labels:"xyz"},lab:{channels:3,labels:"lab"},lch:{channels:3,labels:"lch"},hex:{channels:1,labels:["hex"]},keyword:{channels:1,labels:["keyword"]},ansi16:{channels:1,labels:["ansi16"]},ansi256:{channels:1,labels:["ansi256"]},hcg:{channels:3,labels:["h","c","g"]},apple:{channels:3,labels:["r16","g16","b16"]},gray:{channels:1,labels:["gray"]}};for(var ne in k)if(k.hasOwnProperty(ne)){if(!("channels"in k[ne]))throw new Error("missing channels property: "+ne);if(!("labels"in k[ne]))throw new Error("missing channel labels property: "+ne);if(k[ne].labels.length!==k[ne].channels)throw new Error("channel and label counts mismatch: "+ne);var he=k[ne].channels,Me=k[ne].labels;delete k[ne].channels,delete k[ne].labels,Object.defineProperty(k[ne],"channels",{value:he}),Object.defineProperty(k[ne],"labels",{value:Me})}function Qe(Re,ft){return Math.pow(Re[0]-ft[0],2)+Math.pow(Re[1]-ft[1],2)+Math.pow(Re[2]-ft[2],2)}k.rgb.hsl=function(Re){var Dr,uo,ft=Re[0]/255,wt=Re[1]/255,It=Re[2]/255,Cn=Math.min(ft,wt,It),er=Math.max(ft,wt,It),sr=er-Cn;return er===Cn?Dr=0:ft===er?Dr=(wt-It)/sr:wt===er?Dr=2+(It-ft)/sr:It===er&&(Dr=4+(ft-wt)/sr),(Dr=Math.min(60*Dr,360))<0&&(Dr+=360),uo=(Cn+er)/2,[Dr,100*(er===Cn?0:uo<=.5?sr/(er+Cn):sr/(2-er-Cn)),100*uo]},k.rgb.hsv=function(Re){var ft,wt,It,Cn,er,sr=Re[0]/255,Dr=Re[1]/255,oi=Re[2]/255,uo=Math.max(sr,Dr,oi),As=uo-Math.min(sr,Dr,oi),as=function(ma){return(uo-ma)/6/As+.5};return 0===As?Cn=er=0:(er=As/uo,ft=as(sr),wt=as(Dr),It=as(oi),sr===uo?Cn=It-wt:Dr===uo?Cn=1/3+ft-It:oi===uo&&(Cn=2/3+wt-ft),Cn<0?Cn+=1:Cn>1&&(Cn-=1)),[360*Cn,100*er,100*uo]},k.rgb.hwb=function(Re){var ft=Re[0],wt=Re[1],It=Re[2];return[k.rgb.hsl(Re)[0],1/255*Math.min(ft,Math.min(wt,It))*100,100*(It=1-1/255*Math.max(ft,Math.max(wt,It)))]},k.rgb.cmyk=function(Re){var Dr,ft=Re[0]/255,wt=Re[1]/255,It=Re[2]/255;return[100*((1-ft-(Dr=Math.min(1-ft,1-wt,1-It)))/(1-Dr)||0),100*((1-wt-Dr)/(1-Dr)||0),100*((1-It-Dr)/(1-Dr)||0),100*Dr]},k.rgb.keyword=function(Re){var ft=b[Re];if(ft)return ft;var It,wt=1/0;for(var Cn in u)if(u.hasOwnProperty(Cn)){var sr=Qe(Re,u[Cn]);sr.04045?Math.pow((ft+.055)/1.055,2.4):ft/12.92)+.3576*(wt=wt>.04045?Math.pow((wt+.055)/1.055,2.4):wt/12.92)+.1805*(It=It>.04045?Math.pow((It+.055)/1.055,2.4):It/12.92)),100*(.2126*ft+.7152*wt+.0722*It),100*(.0193*ft+.1192*wt+.9505*It)]},k.rgb.lab=function(Re){var ft=k.rgb.xyz(Re),wt=ft[0],It=ft[1],Cn=ft[2];return It/=100,Cn/=108.883,wt=(wt/=95.047)>.008856?Math.pow(wt,1/3):7.787*wt+16/116,[116*(It=It>.008856?Math.pow(It,1/3):7.787*It+16/116)-16,500*(wt-It),200*(It-(Cn=Cn>.008856?Math.pow(Cn,1/3):7.787*Cn+16/116))]},k.hsl.rgb=function(Re){var Cn,er,sr,Dr,oi,ft=Re[0]/360,wt=Re[1]/100,It=Re[2]/100;if(0===wt)return[oi=255*It,oi,oi];Cn=2*It-(er=It<.5?It*(1+wt):It+wt-It*wt),Dr=[0,0,0];for(var uo=0;uo<3;uo++)(sr=ft+1/3*-(uo-1))<0&&sr++,sr>1&&sr--,Dr[uo]=255*(oi=6*sr<1?Cn+6*(er-Cn)*sr:2*sr<1?er:3*sr<2?Cn+(er-Cn)*(2/3-sr)*6:Cn);return Dr},k.hsl.hsv=function(Re){var ft=Re[0],wt=Re[1]/100,It=Re[2]/100,Cn=wt,er=Math.max(It,.01);return wt*=(It*=2)<=1?It:2-It,Cn*=er<=1?er:2-er,[ft,100*(0===It?2*Cn/(er+Cn):2*wt/(It+wt)),(It+wt)/2*100]},k.hsv.rgb=function(Re){var ft=Re[0]/60,wt=Re[1]/100,It=Re[2]/100,Cn=Math.floor(ft)%6,er=ft-Math.floor(ft),sr=255*It*(1-wt),Dr=255*It*(1-wt*er),oi=255*It*(1-wt*(1-er));switch(It*=255,Cn){case 0:return[It,oi,sr];case 1:return[Dr,It,sr];case 2:return[sr,It,oi];case 3:return[sr,Dr,It];case 4:return[oi,sr,It];case 5:return[It,sr,Dr]}},k.hsv.hsl=function(Re){var er,sr,Dr,ft=Re[0],wt=Re[1]/100,It=Re[2]/100,Cn=Math.max(It,.01);return Dr=(2-wt)*It,sr=wt*Cn,[ft,100*(sr=(sr/=(er=(2-wt)*Cn)<=1?er:2-er)||0),100*(Dr/=2)]},k.hwb.rgb=function(Re){var er,sr,Dr,oi,uo,As,as,ft=Re[0]/360,wt=Re[1]/100,It=Re[2]/100,Cn=wt+It;switch(Cn>1&&(wt/=Cn,It/=Cn),Dr=6*ft-(er=Math.floor(6*ft)),1&er&&(Dr=1-Dr),oi=wt+Dr*((sr=1-It)-wt),er){default:case 6:case 0:uo=sr,As=oi,as=wt;break;case 1:uo=oi,As=sr,as=wt;break;case 2:uo=wt,As=sr,as=oi;break;case 3:uo=wt,As=oi,as=sr;break;case 4:uo=oi,As=wt,as=sr;break;case 5:uo=sr,As=wt,as=oi}return[255*uo,255*As,255*as]},k.cmyk.rgb=function(Re){var wt=Re[1]/100,It=Re[2]/100,Cn=Re[3]/100;return[255*(1-Math.min(1,Re[0]/100*(1-Cn)+Cn)),255*(1-Math.min(1,wt*(1-Cn)+Cn)),255*(1-Math.min(1,It*(1-Cn)+Cn))]},k.xyz.rgb=function(Re){var Cn,er,sr,ft=Re[0]/100,wt=Re[1]/100,It=Re[2]/100;return er=-.9689*ft+1.8758*wt+.0415*It,sr=.0557*ft+-.204*wt+1.057*It,Cn=(Cn=3.2406*ft+-1.5372*wt+-.4986*It)>.0031308?1.055*Math.pow(Cn,1/2.4)-.055:12.92*Cn,er=er>.0031308?1.055*Math.pow(er,1/2.4)-.055:12.92*er,sr=sr>.0031308?1.055*Math.pow(sr,1/2.4)-.055:12.92*sr,[255*(Cn=Math.min(Math.max(0,Cn),1)),255*(er=Math.min(Math.max(0,er),1)),255*(sr=Math.min(Math.max(0,sr),1))]},k.xyz.lab=function(Re){var ft=Re[0],wt=Re[1],It=Re[2];return wt/=100,It/=108.883,ft=(ft/=95.047)>.008856?Math.pow(ft,1/3):7.787*ft+16/116,[116*(wt=wt>.008856?Math.pow(wt,1/3):7.787*wt+16/116)-16,500*(ft-wt),200*(wt-(It=It>.008856?Math.pow(It,1/3):7.787*It+16/116))]},k.lab.xyz=function(Re){var Cn,er,sr;Cn=Re[1]/500+(er=(Re[0]+16)/116),sr=er-Re[2]/200;var Dr=Math.pow(er,3),oi=Math.pow(Cn,3),uo=Math.pow(sr,3);return er=Dr>.008856?Dr:(er-16/116)/7.787,Cn=oi>.008856?oi:(Cn-16/116)/7.787,sr=uo>.008856?uo:(sr-16/116)/7.787,[Cn*=95.047,er*=100,sr*=108.883]},k.lab.lch=function(Re){var er,ft=Re[0],wt=Re[1],It=Re[2];return(er=360*Math.atan2(It,wt)/2/Math.PI)<0&&(er+=360),[ft,Math.sqrt(wt*wt+It*It),er]},k.lch.lab=function(Re){var sr,wt=Re[1];return sr=Re[2]/360*2*Math.PI,[Re[0],wt*Math.cos(sr),wt*Math.sin(sr)]},k.rgb.ansi16=function(Re){var ft=Re[0],wt=Re[1],It=Re[2],Cn=1 in arguments?arguments[1]:k.rgb.hsv(Re)[2];if(0===(Cn=Math.round(Cn/50)))return 30;var er=30+(Math.round(It/255)<<2|Math.round(wt/255)<<1|Math.round(ft/255));return 2===Cn&&(er+=60),er},k.hsv.ansi16=function(Re){return k.rgb.ansi16(k.hsv.rgb(Re),Re[2])},k.rgb.ansi256=function(Re){var ft=Re[0],wt=Re[1],It=Re[2];return ft===wt&&wt===It?ft<8?16:ft>248?231:Math.round((ft-8)/247*24)+232:16+36*Math.round(ft/255*5)+6*Math.round(wt/255*5)+Math.round(It/255*5)},k.ansi16.rgb=function(Re){var ft=Re%10;if(0===ft||7===ft)return Re>50&&(ft+=3.5),[ft=ft/10.5*255,ft,ft];var wt=.5*(1+~~(Re>50));return[(1&ft)*wt*255,(ft>>1&1)*wt*255,(ft>>2&1)*wt*255]},k.ansi256.rgb=function(Re){if(Re>=232){var ft=10*(Re-232)+8;return[ft,ft,ft]}var wt;return Re-=16,[Math.floor(Re/36)/5*255,Math.floor((wt=Re%36)/6)/5*255,wt%6/5*255]},k.rgb.hex=function(Re){var wt=(((255&Math.round(Re[0]))<<16)+((255&Math.round(Re[1]))<<8)+(255&Math.round(Re[2]))).toString(16).toUpperCase();return"000000".substring(wt.length)+wt},k.hex.rgb=function(Re){var ft=Re.toString(16).match(/[a-f0-9]{6}|[a-f0-9]{3}/i);if(!ft)return[0,0,0];var wt=ft[0];3===ft[0].length&&(wt=wt.split("").map(function(Dr){return Dr+Dr}).join(""));var It=parseInt(wt,16);return[It>>16&255,It>>8&255,255&It]},k.rgb.hcg=function(Re){var oi,ft=Re[0]/255,wt=Re[1]/255,It=Re[2]/255,Cn=Math.max(Math.max(ft,wt),It),er=Math.min(Math.min(ft,wt),It),sr=Cn-er;return oi=sr<=0?0:Cn===ft?(wt-It)/sr%6:Cn===wt?2+(It-ft)/sr:4+(ft-wt)/sr+4,oi/=6,[360*(oi%=1),100*sr,100*(sr<1?er/(1-sr):0)]},k.hsl.hcg=function(Re){var It,ft=Re[1]/100,wt=Re[2]/100,Cn=0;return(It=wt<.5?2*ft*wt:2*ft*(1-wt))<1&&(Cn=(wt-.5*It)/(1-It)),[Re[0],100*It,100*Cn]},k.hsv.hcg=function(Re){var wt=Re[2]/100,It=Re[1]/100*wt,Cn=0;return It<1&&(Cn=(wt-It)/(1-It)),[Re[0],100*It,100*Cn]},k.hcg.rgb=function(Re){var wt=Re[1]/100,It=Re[2]/100;if(0===wt)return[255*It,255*It,255*It];var oi,Cn=[0,0,0],er=Re[0]/360%1*6,sr=er%1,Dr=1-sr;switch(Math.floor(er)){case 0:Cn[0]=1,Cn[1]=sr,Cn[2]=0;break;case 1:Cn[0]=Dr,Cn[1]=1,Cn[2]=0;break;case 2:Cn[0]=0,Cn[1]=1,Cn[2]=sr;break;case 3:Cn[0]=0,Cn[1]=Dr,Cn[2]=1;break;case 4:Cn[0]=sr,Cn[1]=0,Cn[2]=1;break;default:Cn[0]=1,Cn[1]=0,Cn[2]=Dr}return[255*(wt*Cn[0]+(oi=(1-wt)*It)),255*(wt*Cn[1]+oi),255*(wt*Cn[2]+oi)]},k.hcg.hsv=function(Re){var ft=Re[1]/100,It=ft+Re[2]/100*(1-ft),Cn=0;return It>0&&(Cn=ft/It),[Re[0],100*Cn,100*It]},k.hcg.hsl=function(Re){var ft=Re[1]/100,It=Re[2]/100*(1-ft)+.5*ft,Cn=0;return It>0&&It<.5?Cn=ft/(2*It):It>=.5&&It<1&&(Cn=ft/(2*(1-It))),[Re[0],100*Cn,100*It]},k.hcg.hwb=function(Re){var ft=Re[1]/100,It=ft+Re[2]/100*(1-ft);return[Re[0],100*(It-ft),100*(1-It)]},k.hwb.hcg=function(Re){var It=1-Re[2]/100,Cn=It-Re[1]/100,er=0;return Cn<1&&(er=(It-Cn)/(1-Cn)),[Re[0],100*Cn,100*er]},k.apple.rgb=function(Re){return[Re[0]/65535*255,Re[1]/65535*255,Re[2]/65535*255]},k.rgb.apple=function(Re){return[Re[0]/255*65535,Re[1]/255*65535,Re[2]/255*65535]},k.gray.rgb=function(Re){return[Re[0]/100*255,Re[0]/100*255,Re[0]/100*255]},k.gray.hsl=k.gray.hsv=function(Re){return[0,0,Re[0]]},k.gray.hwb=function(Re){return[0,100,Re[0]]},k.gray.cmyk=function(Re){return[0,0,0,Re[0]]},k.gray.lab=function(Re){return[Re[0],0,0]},k.gray.hex=function(Re){var ft=255&Math.round(Re[0]/100*255),It=((ft<<16)+(ft<<8)+ft).toString(16).toUpperCase();return"000000".substring(It.length)+It},k.rgb.gray=function(Re){return[(Re[0]+Re[1]+Re[2])/3/255*100]}});function fe(h){var b=function se(){for(var h={},b=Object.keys(e),N=b.length,k=0;k1&&(N=Array.prototype.slice.call(arguments));var k=h(N);if("object"==typeof k)for(var ne=k.length,he=0;he1&&(N=Array.prototype.slice.call(arguments)),h(N))};return"conversion"in h&&(b.conversion=h.conversion),b}(ne)})});var He=Et,We={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]},Le={getRgba:Pt,getHsla:it,getRgb:function cn(h){var b=Pt(h);return b&&b.slice(0,3)},getHsl:function pn(h){var b=it(h);return b&&b.slice(0,3)},getHwb:Xt,getAlpha:function Rn(h){var b=Pt(h);return b||(b=it(h))||(b=Xt(h))?b[3]:void 0},hexString:function At(h,N){return N=void 0!==N&&3===h.length?N:h[3],"#"+jr(h[0])+jr(h[1])+jr(h[2])+(N>=0&&N<1?jr(Math.round(255*N)):"")},rgbString:function qt(h,b){return b<1||h[3]&&h[3]<1?sn(h,b):"rgb("+h[0]+", "+h[1]+", "+h[2]+")"},rgbaString:sn,percentString:function fn(h,b){return b<1||h[3]&&h[3]<1?xn(h,b):"rgb("+Math.round(h[0]/255*100)+"%, "+Math.round(h[1]/255*100)+"%, "+Math.round(h[2]/255*100)+"%)"},percentaString:xn,hslString:function Kr(h,b){return b<1||h[3]&&h[3]<1?Or(h,b):"hsl("+h[0]+", "+h[1]+"%, "+h[2]+"%)"},hslaString:Or,hwbString:function Lr(h,b){return void 0===b&&(b=void 0!==h[3]?h[3]:1),"hwb("+h[0]+", "+h[1]+"%, "+h[2]+"%"+(void 0!==b&&1!==b?", "+b:"")+")"},keyword:function ir(h){return br[h.slice(0,3)]}};function Pt(h){if(h){var Me=[0,0,0],Qe=1,Re=h.match(/^#([a-fA-F0-9]{3,4})$/i),ft="";if(Re){ft=(Re=Re[1])[3];for(var wt=0;wtN?(b+.05)/(N+.05):(N+.05)/(b+.05)},level:function(h){var b=this.contrast(h);return b>=7.1?"AAA":b>=4.5?"AA":""},dark:function(){var h=this.values.rgb;return(299*h[0]+587*h[1]+114*h[2])/1e3<128},light:function(){return!this.dark()},negate:function(){for(var h=[],b=0;b<3;b++)h[b]=255-this.values.rgb[b];return this.setValues("rgb",h),this},lighten:function(h){var b=this.values.hsl;return b[2]+=b[2]*h,this.setValues("hsl",b),this},darken:function(h){var b=this.values.hsl;return b[2]-=b[2]*h,this.setValues("hsl",b),this},saturate:function(h){var b=this.values.hsl;return b[1]+=b[1]*h,this.setValues("hsl",b),this},desaturate:function(h){var b=this.values.hsl;return b[1]-=b[1]*h,this.setValues("hsl",b),this},whiten:function(h){var b=this.values.hwb;return b[1]+=b[1]*h,this.setValues("hwb",b),this},blacken:function(h){var b=this.values.hwb;return b[2]+=b[2]*h,this.setValues("hwb",b),this},greyscale:function(){var h=this.values.rgb,b=.3*h[0]+.59*h[1]+.11*h[2];return this.setValues("rgb",[b,b,b]),this},clearer:function(h){var b=this.values.alpha;return this.setValues("alpha",b-b*h),this},opaquer:function(h){var b=this.values.alpha;return this.setValues("alpha",b+b*h),this},rotate:function(h){var b=this.values.hsl,N=(b[0]+h)%360;return b[0]=N<0?360+N:N,this.setValues("hsl",b),this},mix:function(h,b){var N=this,k=h,ne=void 0===b?.5:b,he=2*ne-1,Me=N.alpha()-k.alpha(),Qe=((he*Me==-1?he:(he+Me)/(1+he*Me))+1)/2,Re=1-Qe;return this.rgb(Qe*N.red()+Re*k.red(),Qe*N.green()+Re*k.green(),Qe*N.blue()+Re*k.blue()).alpha(N.alpha()*ne+k.alpha()*(1-ne))},toJSON:function(){return this.rgb()},clone:function(){var k,ne,h=new Wt,b=this.values,N=h.values;for(var he in b)b.hasOwnProperty(he)&&("[object Array]"===(ne={}.toString.call(k=b[he]))?N[he]=k.slice(0):"[object Number]"===ne?N[he]=k:console.error("unexpected color value:",k));return h}},Wt.prototype.spaces={rgb:["red","green","blue"],hsl:["hue","saturation","lightness"],hsv:["hue","saturation","value"],hwb:["hue","whiteness","blackness"],cmyk:["cyan","magenta","yellow","black"]},Wt.prototype.maxes={rgb:[255,255,255],hsl:[360,100,100],hsv:[360,100,100],hwb:[360,100,100],cmyk:[100,100,100,100]},Wt.prototype.getValues=function(h){for(var b=this.values,N={},k=0;k"u"},isArray:function(h){if(Array.isArray&&Array.isArray(h))return!0;var b=Object.prototype.toString.call(h);return"[object"===b.substr(0,7)&&"Array]"===b.substr(-6)},isObject:function(h){return null!==h&&"[object Object]"===Object.prototype.toString.call(h)},isFinite:function(h){return("number"==typeof h||h instanceof Number)&&isFinite(h)},valueOrDefault:function(h,b){return typeof h>"u"?b:h},valueAtIndexOrDefault:function(h,b,N){return jn.valueOrDefault(jn.isArray(h)?h[b]:h,N)},callback:function(h,b,N){if(h&&"function"==typeof h.call)return h.apply(N,b)},each:function(h,b,N,k){var ne,he,Me;if(jn.isArray(h))if(he=h.length,k)for(ne=he-1;ne>=0;ne--)b.call(N,h[ne],ne);else for(ne=0;ne=1?h:-(Math.sqrt(1-h*h)-1)},easeOutCirc:function(h){return Math.sqrt(1-(h-=1)*h)},easeInOutCirc:function(h){return(h/=.5)<1?-.5*(Math.sqrt(1-h*h)-1):.5*(Math.sqrt(1-(h-=2)*h)+1)},easeInElastic:function(h){var b=1.70158,N=0,k=1;return 0===h?0:1===h?1:(N||(N=.3),k<1?(k=1,b=N/4):b=N/(2*Math.PI)*Math.asin(1/k),-k*Math.pow(2,10*(h-=1))*Math.sin((h-b)*(2*Math.PI)/N))},easeOutElastic:function(h){var b=1.70158,N=0,k=1;return 0===h?0:1===h?1:(N||(N=.3),k<1?(k=1,b=N/4):b=N/(2*Math.PI)*Math.asin(1/k),k*Math.pow(2,-10*h)*Math.sin((h-b)*(2*Math.PI)/N)+1)},easeInOutElastic:function(h){var b=1.70158,N=0,k=1;return 0===h?0:2==(h/=.5)?1:(N||(N=.45),k<1?(k=1,b=N/4):b=N/(2*Math.PI)*Math.asin(1/k),h<1?k*Math.pow(2,10*(h-=1))*Math.sin((h-b)*(2*Math.PI)/N)*-.5:k*Math.pow(2,-10*(h-=1))*Math.sin((h-b)*(2*Math.PI)/N)*.5+1)},easeInBack:function(h){var b=1.70158;return h*h*((b+1)*h-b)},easeOutBack:function(h){var b=1.70158;return(h-=1)*h*((b+1)*h+b)+1},easeInOutBack:function(h){var b=1.70158;return(h/=.5)<1?h*h*((1+(b*=1.525))*h-b)*.5:.5*((h-=2)*h*((1+(b*=1.525))*h+b)+2)},easeInBounce:function(h){return 1-Oi.easeOutBounce(1-h)},easeOutBounce:function(h){return h<1/2.75?7.5625*h*h:h<2/2.75?7.5625*(h-=1.5/2.75)*h+.75:h<2.5/2.75?7.5625*(h-=2.25/2.75)*h+.9375:7.5625*(h-=2.625/2.75)*h+.984375},easeInOutBounce:function(h){return h<.5?.5*Oi.easeInBounce(2*h):.5*Oi.easeOutBounce(2*h-1)+.5}},Wi={effects:Oi};hr.easingEffects=Oi;var so=Math.PI,kr=so/180,Ei=2*so,ii=so/2,mr=so/4,pr=2*so/3,Eo={clear:function(h){h.ctx.clearRect(0,0,h.width,h.height)},roundedRect:function(h,b,N,k,ne,he){if(he){var Me=Math.min(he,ne/2,k/2),Qe=b+Me,Re=N+Me,ft=b+k-Me,wt=N+ne-Me;h.moveTo(b,Re),Qeb.left-N&&h.xb.top-N&&h.y0&&h.requestAnimationFrame()},advance:function(){for(var b,N,k,ne,h=this.animations,he=0;he=k?(Pe.callback(b.onAnimationComplete,[b],N),N.animating=!1,h.splice(he,1)):++he}},Ot=Pe.options.resolve,mn=["push","pop","shift","splice","unshift"];function Ti(h,b){var N=h._chartjs;if(N){var k=N.listeners,ne=k.indexOf(b);-1!==ne&&k.splice(ne,1),!(k.length>0)&&(mn.forEach(function(he){delete h[he]}),delete h._chartjs)}}var Ci=function(h,b){this.initialize(h,b)};Pe.extend(Ci.prototype,{datasetElementType:null,dataElementType:null,_datasetElementOptions:["backgroundColor","borderCapStyle","borderColor","borderDash","borderDashOffset","borderJoinStyle","borderWidth"],_dataElementOptions:["backgroundColor","borderColor","borderWidth","pointStyle"],initialize:function(h,b){var N=this;N.chart=h,N.index=b,N.linkScales(),N.addElements(),N._type=N.getMeta().type},updateIndex:function(h){this.index=h},linkScales:function(){var h=this,b=h.getMeta(),N=h.chart,k=N.scales,ne=h.getDataset(),he=N.options.scales;(null===b.xAxisID||!(b.xAxisID in k)||ne.xAxisID)&&(b.xAxisID=ne.xAxisID||he.xAxes[0].id),(null===b.yAxisID||!(b.yAxisID in k)||ne.yAxisID)&&(b.yAxisID=ne.yAxisID||he.yAxes[0].id)},getDataset:function(){return this.chart.data.datasets[this.index]},getMeta:function(){return this.chart.getDatasetMeta(this.index)},getScaleForId:function(h){return this.chart.scales[h]},_getValueScaleId:function(){return this.getMeta().yAxisID},_getIndexScaleId:function(){return this.getMeta().xAxisID},_getValueScale:function(){return this.getScaleForId(this._getValueScaleId())},_getIndexScale:function(){return this.getScaleForId(this._getIndexScaleId())},reset:function(){this._update(!0)},destroy:function(){this._data&&Ti(this._data,this)},createMetaDataset:function(){var h=this,b=h.datasetElementType;return b&&new b({_chart:h.chart,_datasetIndex:h.index})},createMetaData:function(h){var b=this,N=b.dataElementType;return N&&new N({_chart:b.chart,_datasetIndex:b.index,_index:h})},addElements:function(){var ne,he,h=this,b=h.getMeta(),N=h.getDataset().data||[],k=b.data;for(ne=0,he=N.length;nek&&h.insertElements(k,ne-k)},insertElements:function(h,b){for(var N=0;Nne?h.arc(Me,Qe,b.innerRadius-ne,k+(he=ne/b.innerRadius),N-he,!0):h.arc(Me,Qe,ne,k+Math.PI/2,N-Math.PI/2),h.closePath(),h.clip()}function Ni(h,b,N){var k="inner"===b.borderAlign;k?(h.lineWidth=2*b.borderWidth,h.lineJoin="round"):(h.lineWidth=b.borderWidth,h.lineJoin="bevel"),N.fullCircles&&function dr(h,b,N,k){var he,ne=N.endAngle;for(k&&(N.endAngle=N.startAngle+Ko,_s(h,N),N.endAngle=ne,N.endAngle===N.startAngle&&N.fullCircles&&(N.endAngle+=Ko,N.fullCircles--)),h.beginPath(),h.arc(N.x,N.y,N.innerRadius,N.startAngle+Ko,N.startAngle,!0),he=0;heQe;)ne-=Ko;for(;ne=Me&&ne<=Qe&&he>=N.innerRadius&&he<=N.outerRadius}return!1},getCenterPoint:function(){var h=this._view,b=(h.startAngle+h.endAngle)/2,N=(h.innerRadius+h.outerRadius)/2;return{x:h.x+Math.cos(b)*N,y:h.y+Math.sin(b)*N}},getArea:function(){var h=this._view;return Math.PI*((h.endAngle-h.startAngle)/(2*Math.PI))*(Math.pow(h.outerRadius,2)-Math.pow(h.innerRadius,2))},tooltipPosition:function(){var h=this._view,b=h.startAngle+(h.endAngle-h.startAngle)/2,N=(h.outerRadius-h.innerRadius)/2+h.innerRadius;return{x:h.x+Math.cos(b)*N,y:h.y+Math.sin(b)*N}},draw:function(){var ne,h=this._chart.ctx,b=this._view,N="inner"===b.borderAlign?.33:0,k={x:b.x,y:b.y,innerRadius:b.innerRadius,outerRadius:Math.max(b.outerRadius-N,0),pixelMargin:N,startAngle:b.startAngle,endAngle:b.endAngle,fullCircles:Math.floor(b.circumference/Ko)};if(h.save(),h.fillStyle=b.backgroundColor,h.strokeStyle=b.borderColor,k.fullCircles){for(k.endAngle=k.startAngle+Ko,h.beginPath(),h.arc(k.x,k.y,k.outerRadius,k.startAngle,k.endAngle),h.arc(k.x,k.y,k.innerRadius,k.endAngle,k.startAngle,!0),h.closePath(),ne=0;neh.x&&(b=ci(b,"left","right")):h.baseN?N:he,r:ne.right||Me<0?0:Me>b?b:Me,b:ne.bottom||Qe<0?0:Qe>N?N:Qe,l:ne.left||Re<0?0:Re>b?b:Re}}function ts(h,b,N){var k=null===b,ne=null===N,he=!(!h||k&&ne)&&Bn(h);return he&&(k||b>=he.left&&b<=he.right)&&(ne||N>=he.top&&N<=he.bottom)}qr._set("global",{elements:{rectangle:{backgroundColor:Vt,borderColor:Vt,borderSkipped:"bottom",borderWidth:0}}});var jo=nr.extend({_type:"rectangle",draw:function(){var h=this._chart.ctx,b=this._view,N=function es(h){var b=Bn(h),N=b.right-b.left,k=b.bottom-b.top,ne=go(h,N/2,k/2);return{outer:{x:b.left,y:b.top,w:N,h:k},inner:{x:b.left+ne.l,y:b.top+ne.t,w:N-ne.l-ne.r,h:k-ne.t-ne.b}}}(b),k=N.outer,ne=N.inner;h.fillStyle=b.backgroundColor,h.fillRect(k.x,k.y,k.w,k.h),(k.w!==ne.w||k.h!==ne.h)&&(h.save(),h.beginPath(),h.rect(k.x,k.y,k.w,k.h),h.clip(),h.fillStyle=b.borderColor,h.rect(ne.x,ne.y,ne.w,ne.h),h.fill("evenodd"),h.restore())},height:function(){var h=this._view;return h.base-h.y},inRange:function(h,b){return ts(this._view,h,b)},inLabelRange:function(h,b){var N=this._view;return bn(N)?ts(N,h,null):ts(N,null,b)},inXRange:function(h){return ts(this._view,h,null)},inYRange:function(h){return ts(this._view,null,h)},getCenterPoint:function(){var b,N,h=this._view;return bn(h)?(b=h.x,N=(h.y+h.base)/2):(b=(h.x+h.base)/2,N=h.y),{x:b,y:N}},getArea:function(){var h=this._view;return bn(h)?h.width*Math.abs(h.y-h.base):h.height*Math.abs(h.x-h.base)},tooltipPosition:function(){var h=this._view;return{x:h.x,y:h.y}}}),ss={},Is=ji,la=ro,Ro=jo;ss.Arc=ti,ss.Line=Is,ss.Point=la,ss.Rectangle=Ro;var jl=Pe._deprecated,gl=Pe.valueOrDefault;function da(h,b,N){var Qe,Re,k=N.barThickness,ne=b.stackCount,he=b.pixels[h],Me=Pe.isNullOrUndef(k)?function qa(h,b){var k,ne,he,Me,N=h._length;for(he=1,Me=b.length;he0?Math.min(N,Math.abs(ne-k)):N,k=ne;return N}(b.scale,b.pixels):-1;return Pe.isNullOrUndef(k)?(Qe=Me*N.categoryPercentage,Re=N.barPercentage):(Qe=k*ne,Re=1),{chunk:Qe/ne,ratio:Re,start:he-Qe/2}}qr._set("bar",{hover:{mode:"label"},scales:{xAxes:[{type:"category",offset:!0,gridLines:{offsetGridLines:!0}}],yAxes:[{type:"linear"}]}}),qr._set("global",{datasets:{bar:{categoryPercentage:.8,barPercentage:.9}}});var Rl=Ai.extend({dataElementType:ss.Rectangle,_dataElementOptions:["backgroundColor","borderColor","borderSkipped","borderWidth","barPercentage","barThickness","categoryPercentage","maxBarThickness","minBarLength"],initialize:function(){var b,N,h=this;Ai.prototype.initialize.apply(h,arguments),(b=h.getMeta()).stack=h.getDataset().stack,b.bar=!0,N=h._getIndexScale().options,jl("bar chart",N.barPercentage,"scales.[x/y]Axes.barPercentage","dataset.barPercentage"),jl("bar chart",N.barThickness,"scales.[x/y]Axes.barThickness","dataset.barThickness"),jl("bar chart",N.categoryPercentage,"scales.[x/y]Axes.categoryPercentage","dataset.categoryPercentage"),jl("bar chart",h._getValueScale().options.minBarLength,"scales.[x/y]Axes.minBarLength","dataset.minBarLength"),jl("bar chart",N.maxBarThickness,"scales.[x/y]Axes.maxBarThickness","dataset.maxBarThickness")},update:function(h){var k,ne,b=this,N=b.getMeta().data;for(b._ruler=b.getRuler(),k=0,ne=N.length;k=0&&ft.min>=0?ft.min:ft.max,sr=void 0===ft.start?ft.end:ft.max>=0&&ft.min>=0?ft.max-ft.min:ft.min-ft.max,Dr=Re.length;if(It||void 0===It&&void 0!==Cn)for(oi=0;oi=0&&Pl.max>=0?Pl.max:Pl.min,(ft.min<0&&As<0||ft.max>=0&&As>0)&&(er+=As));return as=he.getPixelForValue(er),Na=(ma=he.getPixelForValue(er+sr))-as,void 0!==wt&&Math.abs(Na)=0&&!Me||sr<0&&Me?as-wt:as+wt),{size:Na,base:as,head:ma,center:ma+Na/2}},calculateBarIndexPixels:function(h,b,N,k){var he="flex"===k.barThickness?function $a(h,b,N){var Re,k=b.pixels,ne=k[h],he=h>0?k[h-1]:null,Me=h=$s?-Aa:As<-$s?Aa:0)+Cn,ma=Math.cos(As),Na=Math.sin(As),Pl=Math.cos(as),il=Math.sin(as),dl=As<=0&&as>=0||as>=Aa,Nl=As<=Ja&&as>=Ja||as>=Aa+Ja,ac=As<=-Ja&&as>=-Ja||as>=$s+Ja,wa=As===-$s||as>=$s?-1:Math.min(ma,ma*It,Pl,Pl*It),nc=ac?-1:Math.min(Na,Na*It,il,il*It),yc=dl?1:Math.max(ma,ma*It,Pl,Pl*It),Gc=Nl?1:Math.max(Na,Na*It,il,il*It);he=(yc-wa)/2,Me=(Gc-nc)/2,Qe=-(yc+wa)/2,Re=-(Gc+nc)/2}for(oi=0,uo=wt.length;oi0&&!isNaN(h)?Aa*(Math.abs(h)/b):0},getMaxBorderWidth:function(h){var ne,he,Me,Qe,Re,ft,wt,It,N=0,k=this.chart;if(!h)for(ne=0,he=k.data.datasets.length;ne(N=(wt=ft.borderWidth)>N?wt:N)?It:N);return N},setHoverStyle:function(h){var b=h._model,N=h._options,k=Pe.getHoverColor;h.$previousStyle={backgroundColor:b.backgroundColor,borderColor:b.borderColor,borderWidth:b.borderWidth},b.backgroundColor=hs(N.hoverBackgroundColor,k(N.backgroundColor)),b.borderColor=hs(N.hoverBorderColor,k(N.borderColor)),b.borderWidth=hs(N.hoverBorderWidth,N.borderWidth)},_getRingWeightOffset:function(h){for(var b=0,N=0;N0&&ns(he[Me-1]._model,ne)&&(Re.controlPointPreviousX=wt(Re.controlPointPreviousX,ne.left,ne.right),Re.controlPointPreviousY=wt(Re.controlPointPreviousY,ne.top,ne.bottom)),Me0&&(he=h.getDatasetMeta(he[0]._datasetIndex).data),he},"x-axis":function(h,b){return Ll(h,b,{intersect:!1})},point:function(h,b){return ds(h,Xi(b,h))},nearest:function(h,b,N){var k=Xi(b,h);N.axis=N.axis||"xy";var ne=Js(N.axis);return qs(h,k,N.intersect,ne)},x:function(h,b,N){var k=Xi(b,h),ne=[],he=!1;return ws(h,function(Me){Me.inXRange(k.x)&&ne.push(Me),Me.inRange(k.x,k.y)&&(he=!0)}),N.intersect&&!he&&(ne=[]),ne},y:function(h,b,N){var k=Xi(b,h),ne=[],he=!1;return ws(h,function(Me){Me.inYRange(k.y)&&ne.push(Me),Me.inRange(k.x,k.y)&&(he=!0)}),N.intersect&&!he&&(ne=[]),ne}}},Yu=Pe.extend;function Nc(h,b){return Pe.where(h,function(N){return N.pos===b})}function qu(h,b){return h.sort(function(N,k){var ne=b?k:N,he=b?N:k;return ne.weight===he.weight?ne.index-he.index:ne.weight-he.weight})}function au(h,b,N,k){return Math.max(h[N],b[N])+Math.max(h[k],b[k])}function Da(h,b,N){var he,Me,k=N.box,ne=h.maxPadding;if(N.size&&(h[N.pos]-=N.size),N.size=N.horizontal?k.height:k.width,h[N.pos]+=N.size,k.getPadding){var Qe=k.getPadding();ne.top=Math.max(ne.top,Qe.top),ne.left=Math.max(ne.left,Qe.left),ne.bottom=Math.max(ne.bottom,Qe.bottom),ne.right=Math.max(ne.right,Qe.right)}if(he=b.outerWidth-au(ne,h,"left","right"),Me=b.outerHeight-au(ne,h,"top","bottom"),he!==h.w||Me!==h.h){h.w=he,h.h=Me;var Re=N.horizontal?[he,h.w]:[Me,h.h];return!(Re[0]===Re[1]||isNaN(Re[0])&&isNaN(Re[1]))}}function ju(h,b){var N=b.maxPadding;return function k(ne){var he={left:0,top:0,right:0,bottom:0};return ne.forEach(function(Me){he[Me]=Math.max(b[Me],N[Me])}),he}(h?["left","right"]:["top","bottom"])}function el(h,b,N){var ne,he,Me,Qe,Re,ft,k=[];for(ne=0,he=h.length;ne div {\r\n\tposition: absolute;\r\n\twidth: 1000000px;\r\n\theight: 1000000px;\r\n\tleft: 0;\r\n\ttop: 0;\r\n}\r\n\r\n.chartjs-size-monitor-shrink > div {\r\n\tposition: absolute;\r\n\twidth: 200%;\r\n\theight: 200%;\r\n\tleft: 0;\r\n\ttop: 0;\r\n}\r\n"})),ua="$chartjs",El="chartjs-",uu=El+"size-monitor",Eu=El+"render-monitor",$u=El+"render-animation",Ba=["animationstart","webkitAnimationStart"],Tl={touchstart:"mousedown",touchmove:"mousemove",touchend:"mouseup",pointerenter:"mouseenter",pointerdown:"mousedown",pointermove:"mousemove",pointerup:"mouseup",pointerleave:"mouseout",pointerout:"mouseout"};function tl(h,b){var N=Pe.getStyle(h,b),k=N&&N.match(/^(\d+)(\.\d+)?px$/);return k?Number(k[1]):void 0}var cu=!!function(){var h=!1;try{var b=Object.defineProperty({},"passive",{get:function(){h=!0}});window.addEventListener("e",null,b)}catch{}return h}()&&{passive:!0};function Sa(h,b,N){h.addEventListener(b,N,cu)}function Ru(h,b,N){h.removeEventListener(b,N,cu)}function xu(h,b,N,k,ne){return{type:h,chart:b,native:ne||null,x:void 0!==N?N:null,y:void 0!==k?k:null}}function Su(h){var b=document.createElement("div");return b.className=h||"",b}function Dc(h,b,N){var k=h[ua]||(h[ua]={}),ne=k.resizer=function gc(h){var b=1e6,N=Su(uu),k=Su(uu+"-expand"),ne=Su(uu+"-shrink");k.appendChild(Su()),ne.appendChild(Su()),N.appendChild(k),N.appendChild(ne),N._reset=function(){k.scrollLeft=b,k.scrollTop=b,ne.scrollLeft=b,ne.scrollTop=b};var he=function(){N._reset(),h()};return Sa(k,"scroll",he.bind(k,"expand")),Sa(ne,"scroll",he.bind(ne,"shrink")),N}(function nl(h,b){var N=!1,k=[];return function(){k=Array.prototype.slice.call(arguments),b=b||this,N||(N=!0,Pe.requestAnimFrame.call(window,function(){N=!1,h.apply(b,k)}))}}(function(){if(k.resizer){var he=N.options.maintainAspectRatio&&h.parentNode,Me=he?he.clientWidth:0;b(xu("resize",N)),he&&he.clientWidth0){var he=h[0];he.label?N=he.label:he.xLabel?N=he.xLabel:ne>0&&he.index-1?h.split("\n"):h}function Hu(h){var b=h._xScale,N=h._yScale||h._scale,k=h._index,ne=h._datasetIndex,he=h._chart.getDatasetMeta(ne).controller,Me=he._getIndexScale(),Qe=he._getValueScale();return{xLabel:b?b.getLabelForIndex(k,ne):"",yLabel:N?N.getLabelForIndex(k,ne):"",label:Me?""+Me.getLabelForIndex(k,ne):"",value:Qe?""+Qe.getLabelForIndex(k,ne):"",index:k,datasetIndex:ne,x:h._model.x,y:h._model.y}}function zl(h){var b=qr.global;return{xPadding:h.xPadding,yPadding:h.yPadding,xAlign:h.xAlign,yAlign:h.yAlign,rtl:h.rtl,textDirection:h.textDirection,bodyFontColor:h.bodyFontColor,_bodyFontFamily:To(h.bodyFontFamily,b.defaultFontFamily),_bodyFontStyle:To(h.bodyFontStyle,b.defaultFontStyle),_bodyAlign:h.bodyAlign,bodyFontSize:To(h.bodyFontSize,b.defaultFontSize),bodySpacing:h.bodySpacing,titleFontColor:h.titleFontColor,_titleFontFamily:To(h.titleFontFamily,b.defaultFontFamily),_titleFontStyle:To(h.titleFontStyle,b.defaultFontStyle),titleFontSize:To(h.titleFontSize,b.defaultFontSize),_titleAlign:h.titleAlign,titleSpacing:h.titleSpacing,titleMarginBottom:h.titleMarginBottom,footerFontColor:h.footerFontColor,_footerFontFamily:To(h.footerFontFamily,b.defaultFontFamily),_footerFontStyle:To(h.footerFontStyle,b.defaultFontStyle),footerFontSize:To(h.footerFontSize,b.defaultFontSize),_footerAlign:h.footerAlign,footerSpacing:h.footerSpacing,footerMarginTop:h.footerMarginTop,caretSize:h.caretSize,cornerRadius:h.cornerRadius,backgroundColor:h.backgroundColor,opacity:0,legendColorBackground:h.multiKeyBackground,displayColors:h.displayColors,borderColor:h.borderColor,borderWidth:h.borderWidth}}function id(h,b){return"center"===b?h.x+h.width/2:"right"===b?h.x+h.width-h.xPadding:h.x+h.xPadding}function ec(h){return Hs([],Qs(h))}var Fc=nr.extend({initialize:function(){this._model=zl(this._options),this._lastActive=[]},getTitle:function(){var h=this,N=h._options.callbacks,k=N.beforeTitle.apply(h,arguments),ne=N.title.apply(h,arguments),he=N.afterTitle.apply(h,arguments),Me=[];return Me=Hs(Me,Qs(k)),Me=Hs(Me,Qs(ne)),Hs(Me,Qs(he))},getBeforeBody:function(){return ec(this._options.callbacks.beforeBody.apply(this,arguments))},getBody:function(h,b){var N=this,k=N._options.callbacks,ne=[];return Pe.each(h,function(he){var Me={before:[],lines:[],after:[]};Hs(Me.before,Qs(k.beforeLabel.call(N,he,b))),Hs(Me.lines,k.label.call(N,he,b)),Hs(Me.after,Qs(k.afterLabel.call(N,he,b))),ne.push(Me)}),ne},getAfterBody:function(){return ec(this._options.callbacks.afterBody.apply(this,arguments))},getFooter:function(){var h=this,b=h._options.callbacks,N=b.beforeFooter.apply(h,arguments),k=b.footer.apply(h,arguments),ne=b.afterFooter.apply(h,arguments),he=[];return he=Hs(he,Qs(N)),he=Hs(he,Qs(k)),Hs(he,Qs(ne))},update:function(h){var It,Cn,b=this,N=b._options,k=b._model,ne=b._model=zl(N),he=b._active,Me=b._data,Qe={xAlign:k.xAlign,yAlign:k.yAlign},Re={x:k.x,y:k.y},ft={width:k.width,height:k.height},wt={x:k.caretX,y:k.caretY};if(he.length){ne.opacity=1;var er=[],sr=[];wt=mi[N.position].call(b,he,b._eventPosition);var Dr=[];for(It=0,Cn=he.length;Itk.width&&(ne=k.width-b.width),ne<0&&(ne=0)),"top"===wt?he+=It:he-="bottom"===wt?b.height+It:b.height/2,"center"===wt?"left"===ft?ne+=It:"right"===ft&&(ne-=It):"left"===ft?ne-=Cn:"right"===ft&&(ne+=Cn),{x:ne,y:he}}(ne,ft=function sc(h,b){var N=h._chart.ctx,k=2*b.yPadding,ne=0,he=b.body,Me=he.reduce(function(sr,Dr){return sr+Dr.before.length+Dr.lines.length+Dr.after.length},0),Qe=b.title.length,Re=b.footer.length,ft=b.titleFontSize,wt=b.bodyFontSize,It=b.footerFontSize;k+=Qe*ft,k+=Qe?(Qe-1)*b.titleSpacing:0,k+=Qe?b.titleMarginBottom:0,k+=(Me+=b.beforeBody.length+b.afterBody.length)*wt,k+=Me?(Me-1)*b.bodySpacing:0,k+=Re?b.footerMarginTop:0,k+=Re*It,k+=Re?(Re-1)*b.footerSpacing:0;var Cn=0,er=function(sr){ne=Math.max(ne,N.measureText(sr).width+Cn)};return N.font=Pe.fontString(ft,b._titleFontStyle,b._titleFontFamily),Pe.each(b.title,er),N.font=Pe.fontString(wt,b._bodyFontStyle,b._bodyFontFamily),Pe.each(b.beforeBody.concat(b.afterBody),er),Cn=b.displayColors?wt+2:0,Pe.each(he,function(sr){Pe.each(sr.before,er),Pe.each(sr.lines,er),Pe.each(sr.after,er)}),Cn=0,N.font=Pe.fontString(It,b._footerFontStyle,b._footerFontFamily),Pe.each(b.footer,er),{width:ne+=2*b.xPadding,height:k}}(this,ne),Qe=function hu(h,b){var N=h._model,k=h._chart,ne=h._chart.chartArea,he="center",Me="center";N.yk.height-b.height&&(Me="bottom");var Qe,Re,ft,wt,It,Cn=(ne.left+ne.right)/2,er=(ne.top+ne.bottom)/2;"center"===Me?(Qe=function(Dr){return Dr<=Cn},Re=function(Dr){return Dr>Cn}):(Qe=function(Dr){return Dr<=b.width/2},Re=function(Dr){return Dr>=k.width-b.width/2}),ft=function(Dr){return Dr+b.width+N.caretSize+N.caretPadding>k.width},wt=function(Dr){return Dr-b.width-N.caretSize-N.caretPadding<0},It=function(Dr){return Dr<=er?"top":"bottom"},Qe(N.x)?(he="left",ft(N.x)&&(he="center",Me=It(N.y))):Re(N.x)&&(he="right",wt(N.x)&&(he="center",Me=It(N.y)));var sr=h._options;return{xAlign:sr.xAlign?sr.xAlign:he,yAlign:sr.yAlign?sr.yAlign:Me}}(this,ft),b._chart)}else ne.opacity=0;return ne.xAlign=Qe.xAlign,ne.yAlign=Qe.yAlign,ne.x=Re.x,ne.y=Re.y,ne.width=ft.width,ne.height=ft.height,ne.caretX=wt.x,ne.caretY=wt.y,b._model=ne,h&&N.custom&&N.custom.call(b,ne),b},drawCaret:function(h,b){var N=this._chart.ctx,ne=this.getCaretPosition(h,b,this._view);N.lineTo(ne.x1,ne.y1),N.lineTo(ne.x2,ne.y2),N.lineTo(ne.x3,ne.y3)},getCaretPosition:function(h,b,N){var k,ne,he,Me,Qe,Re,ft=N.caretSize,wt=N.cornerRadius,It=N.xAlign,Cn=N.yAlign,er=h.x,sr=h.y,Dr=b.width,oi=b.height;if("center"===Cn)Qe=sr+oi/2,"left"===It?(ne=(k=er)-ft,he=k,Me=Qe+ft,Re=Qe-ft):(ne=(k=er+Dr)+ft,he=k,Me=Qe-ft,Re=Qe+ft);else if("left"===It?(k=(ne=er+wt+ft)-ft,he=ne+ft):"right"===It?(k=(ne=er+Dr-wt-ft)-ft,he=ne+ft):(k=(ne=N.caretX)-ft,he=ne+ft),"top"===Cn)Qe=(Me=sr)-ft,Re=Me;else{Qe=(Me=sr+oi)+ft,Re=Me;var uo=he;he=k,k=uo}return{x1:k,x2:ne,x3:he,y1:Me,y2:Qe,y3:Re}},drawTitle:function(h,b,N){var he,Me,Qe,k=b.title,ne=k.length;if(ne){var Re=Ya(b.rtl,b.x,b.width);for(h.x=id(b,b._titleAlign),N.textAlign=Re.textAlign(b._titleAlign),N.textBaseline="middle",he=b.titleFontSize,Me=b.titleSpacing,N.fillStyle=b.titleFontColor,N.font=Pe.fontString(he,b._titleFontStyle,b._titleFontFamily),Qe=0;Qe0&&N.stroke()},draw:function(){var h=this._chart.ctx,b=this._view;if(0!==b.opacity){var N={width:b.width,height:b.height},k={x:b.x,y:b.y},ne=Math.abs(b.opacity<.001)?0:b.opacity;this._options.enabled&&(b.title.length||b.beforeBody.length||b.body.length||b.afterBody.length||b.footer.length)&&(h.save(),h.globalAlpha=ne,this.drawBackground(k,b,h,N),k.y+=b.yPadding,Pe.rtl.overrideTextDirection(h,b.textDirection),this.drawTitle(k,b,h),this.drawBody(k,b,h),this.drawFooter(k,b,h),Pe.rtl.restoreTextDirection(h,b.textDirection),h.restore())}},handleEvent:function(h){var k,b=this,N=b._options;return b._lastActive=b._lastActive||[],"mouseout"===h.type?b._active=[]:(b._active=b._chart.getElementsAtEventForMode(h,N.mode,N),N.reverse&&b._active.reverse()),(k=!Pe.arrayEquals(b._active,b._lastActive))&&(b._lastActive=b._active,(N.enabled||N.custom)&&(b._eventPosition={x:h.x,y:h.y},b.update(!0),b.pivot())),k}}),Lc=Fc;Lc.positioners=mi;var kl=Pe.valueOrDefault;function sl(){return Pe.merge(Object.create(null),[].slice.call(arguments),{merger:function(h,b,N,k){if("xAxes"===h||"yAxes"===h){var he,Me,Qe,ne=N[h].length;for(b[h]||(b[h]=[]),he=0;he=b[h].length&&b[h].push({}),Pe.merge(b[h][he],!b[h][he].type||Qe.type&&Qe.type!==b[h][he].type?[fi.getScaleDefaults(Me),Qe]:Qe)}else Pe._merger(h,b,N,k)}})}function ja(){return Pe.merge(Object.create(null),[].slice.call(arguments),{merger:function(h,b,N,k){var ne=b[h]||Object.create(null),he=N[h];"scales"===h?b[h]=sl(ne,he):"scale"===h?b[h]=Pe.merge(ne,[fi.getScaleDefaults(he.type),he]):Pe._merger(h,b,N,k)}})}function yt(h,b,N){var k,ne=function(he){return he.id===k};do{k=b+N++}while(Pe.findIndex(h,ne)>=0);return k}function Xe(h){return"top"===h||"bottom"===h}function Gt(h,b){return function(N,k){return N[h]===k[h]?N[b]-k[b]:N[h]-k[h]}}qr._set("global",{elements:{},events:["mousemove","mouseout","click","touchstart","touchmove"],hover:{onHover:null,mode:"nearest",intersect:!0,animationDuration:400},onClick:null,maintainAspectRatio:!0,responsive:!0,responsiveAnimationDuration:0});var An=function(h,b){return this.construct(h,b),this};Pe.extend(An.prototype,{construct:function(h,b){var N=this;b=function Q(h){var b=(h=h||Object.create(null)).data=h.data||{};return b.datasets=b.datasets||[],b.labels=b.labels||[],h.options=ja(qr.global,qr[h.type],h.options||{}),h}(b);var k=Je.acquireContext(h,b),ne=k&&k.canvas,he=ne&&ne.height,Me=ne&&ne.width;N.id=Pe.uid(),N.ctx=k,N.canvas=ne,N.config=b,N.width=Me,N.height=he,N.aspectRatio=he?Me/he:null,N.options=b.options,N._bufferedRender=!1,N._layers=[],N.chart=N,N.controller=N,An.instances[N.id]=N,Object.defineProperty(N,"data",{get:function(){return N.config.data},set:function(Qe){N.config.data=Qe}}),k&&ne?(N.initialize(),N.update()):console.error("Failed to create chart: can't acquire context from the given item")},initialize:function(){var h=this;return en.notify(h,"beforeInit"),Pe.retinaScale(h,h.options.devicePixelRatio),h.bindEvents(),h.options.responsive&&h.resize(!0),h.initToolTip(),en.notify(h,"afterInit"),h},clear:function(){return Pe.canvas.clear(this),this},stop:function(){return Ge.cancelAnimation(this),this},resize:function(h){var b=this,N=b.options,k=b.canvas,ne=N.maintainAspectRatio&&b.aspectRatio||null,he=Math.max(0,Math.floor(Pe.getMaximumWidth(k))),Me=Math.max(0,Math.floor(ne?he/ne:Pe.getMaximumHeight(k)));if((b.width!==he||b.height!==Me)&&(k.width=b.width=he,k.height=b.height=Me,k.style.width=he+"px",k.style.height=Me+"px",Pe.retinaScale(b,N.devicePixelRatio),!h)){var Qe={width:he,height:Me};en.notify(b,"resize",[Qe]),N.onResize&&N.onResize(b,Qe),b.stop(),b.update({duration:N.responsiveAnimationDuration})}},ensureScalesHaveIDs:function(){var h=this.options,b=h.scales||{},N=h.scale;Pe.each(b.xAxes,function(k,ne){k.id||(k.id=yt(b.xAxes,"x-axis-",ne))}),Pe.each(b.yAxes,function(k,ne){k.id||(k.id=yt(b.yAxes,"y-axis-",ne))}),N&&(N.id=N.id||"scale")},buildOrUpdateScales:function(){var h=this,b=h.options,N=h.scales||{},k=[],ne=Object.keys(N).reduce(function(he,Me){return he[Me]=!1,he},{});b.scales&&(k=k.concat((b.scales.xAxes||[]).map(function(he){return{options:he,dtype:"category",dposition:"bottom"}}),(b.scales.yAxes||[]).map(function(he){return{options:he,dtype:"linear",dposition:"left"}}))),b.scale&&k.push({options:b.scale,dtype:"radialLinear",isDefault:!0,dposition:"chartArea"}),Pe.each(k,function(he){var Me=he.options,Qe=Me.id,Re=kl(Me.type,he.dtype);Xe(Me.position)!==Xe(he.dposition)&&(Me.position=he.dposition),ne[Qe]=!0;var ft=null;if(Qe in N&&N[Qe].type===Re)(ft=N[Qe]).options=Me,ft.ctx=h.ctx,ft.chart=h;else{var wt=fi.getScaleConstructor(Re);if(!wt)return;ft=new wt({id:Qe,type:Re,options:Me,ctx:h.ctx,chart:h}),N[ft.id]=ft}ft.mergeTicksOptions(),he.isDefault&&(h.scale=ft)}),Pe.each(ne,function(he,Me){he||delete N[Me]}),h.scales=N,fi.addScalesToLayout(this)},buildOrUpdateControllers:function(){var k,ne,h=this,b=[],N=h.data.datasets;for(k=0,ne=N.length;k=0;--k)b.drawDataset(N[k],h);en.notify(b,"afterDatasetsDraw",[h])}},drawDataset:function(h,b){var k={meta:h,index:h.index,easingValue:b};!1!==en.notify(this,"beforeDatasetDraw",[k])&&(h.controller.draw(b),en.notify(this,"afterDatasetDraw",[k]))},_drawTooltip:function(h){var b=this,N=b.tooltip,k={tooltip:N,easingValue:h};!1!==en.notify(b,"beforeTooltipDraw",[k])&&(N.draw(),en.notify(b,"afterTooltipDraw",[k]))},getElementAtEvent:function(h){return vl.modes.single(this,h)},getElementsAtEvent:function(h){return vl.modes.label(this,h,{intersect:!0})},getElementsAtXAxis:function(h){return vl.modes["x-axis"](this,h,{intersect:!0})},getElementsAtEventForMode:function(h,b,N){var k=vl.modes[b];return"function"==typeof k?k(this,h,N):[]},getDatasetAtEvent:function(h){return vl.modes.dataset(this,h,{intersect:!0})},getDatasetMeta:function(h){var b=this,N=b.data.datasets[h];N._meta||(N._meta={});var k=N._meta[b.id];return k||(k=N._meta[b.id]={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null,order:N.order||0,index:h}),k},getVisibleDatasetCount:function(){for(var h=0,b=0,N=this.data.datasets.length;b3?N[2]-N[1]:N[1]-N[0];Math.abs(k)>1&&h!==Math.floor(h)&&(k=h-Math.floor(h));var ne=Pe.log10(Math.abs(k)),he="";if(0!==h)if(Math.max(Math.abs(N[0]),Math.abs(N[N.length-1]))<1e-4){var Qe=Pe.log10(Math.abs(h)),Re=Math.floor(Qe)-Math.floor(ne);Re=Math.max(Math.min(Re,20),0),he=h.toExponential(Re)}else{var ft=-1*Math.floor(ne);ft=Math.max(Math.min(ft,20),0),he=h.toFixed(ft)}else he="0";return he},logarithmic:function(h,b,N){var k=h/Math.pow(10,Math.floor(Pe.log10(h)));return 0===h?"0":1===k||2===k||5===k||0===b||b===N.length-1?h.toExponential():""}}},Qn=Pe.isArray,Gr=Pe.isNullOrUndef,Fr=Pe.valueOrDefault,Ui=Pe.valueAtIndexOrDefault;function Fa(h,b,N){var ft,k=h.getTicks().length,ne=Math.min(b,k-1),he=h.getPixelForTick(ne),Me=h._startPixel,Qe=h._endPixel;if(!(N&&(ft=1===k?Math.max(he-Me,Qe-he):0===b?(h.getPixelForTick(1)-he)/2:(he-h.getPixelForTick(ne-1))/2,he+=neQe+1e-6)))return he}function zo(h,b,N,k){var wt,It,Cn,er,sr,Dr,oi,uo,As,as,ma,Na,Pl,ne=N.length,he=[],Me=[],Qe=[],Re=0,ft=0;for(wt=0;wtb){for(he=0;he=he||k<=1||!h.isHorizontal()?h.labelRotation=ne:(Re=(Qe=h._getLabelSizes()).widest.width,ft=Qe.highest.height-Qe.highest.offset,wt=Math.min(h.maxWidth,h.chart.width-Re),Re+6>(It=b.offset?h.maxWidth/k:wt/(k-1))&&(It=wt/(k-(b.offset?.5:1)),Cn=h.maxHeight-$l(b.gridLines)-N.padding-xl(b.scaleLabel),er=Math.sqrt(Re*Re+ft*ft),Me=Pe.toDegrees(Math.min(Math.asin(Math.min((Qe.highest.height+6)/It,1)),Math.asin(Math.min(Cn/er,1))-Math.asin(ft/er))),Me=Math.max(ne,Math.min(he,Me))),h.labelRotation=Me)},afterCalculateTickRotation:function(){Pe.callback(this.options.afterCalculateTickRotation,[this])},beforeFit:function(){Pe.callback(this.options.beforeFit,[this])},fit:function(){var h=this,b=h.minSize={width:0,height:0},N=h.chart,k=h.options,ne=k.ticks,he=k.scaleLabel,Me=k.gridLines,Qe=h._isVisible(),Re="bottom"===k.position,ft=h.isHorizontal();if(ft?b.width=h.maxWidth:Qe&&(b.width=$l(Me)+xl(he)),ft?Qe&&(b.height=$l(Me)+xl(he)):b.height=h.maxHeight,ne.display&&Qe){var wt=Xc(ne),It=h._getLabelSizes(),Cn=It.first,er=It.last,sr=It.widest,Dr=It.highest,oi=.4*wt.minor.lineHeight,uo=ne.padding;if(ft){var As=0!==h.labelRotation,as=Pe.toRadians(h.labelRotation),ma=Math.cos(as),Na=Math.sin(as);b.height=Math.min(h.maxHeight,b.height+(Na*sr.width+ma*(Dr.height-(As?Dr.offset:0))+(As?0:oi))+uo);var Nl,Qu,il=h.getPixelForTick(0)-h.left,dl=h.right-h.getPixelForTick(h.getTicks().length-1);As?(Nl=Re?ma*Cn.width+Na*Cn.offset:Na*(Cn.height-Cn.offset),Qu=Re?Na*(er.height-er.offset):ma*er.width+Na*er.offset):(Nl=Cn.width/2,Qu=er.width/2),h.paddingLeft=Math.max((Nl-il)*h.width/(h.width-il),0)+3,h.paddingRight=Math.max((Qu-dl)*h.width/(h.width-dl),0)+3}else b.width=Math.min(h.maxWidth,b.width+(ne.mirror?0:sr.width+uo+oi)),h.paddingTop=Cn.height/2,h.paddingBottom=er.height/2}h.handleMargins(),ft?(h.width=h._length=N.width-h.margins.left-h.margins.right,h.height=b.height):(h.width=b.width,h.height=h._length=N.height-h.margins.top-h.margins.bottom)},handleMargins:function(){var h=this;h.margins&&(h.margins.left=Math.max(h.paddingLeft,h.margins.left),h.margins.top=Math.max(h.paddingTop,h.margins.top),h.margins.right=Math.max(h.paddingRight,h.margins.right),h.margins.bottom=Math.max(h.paddingBottom,h.margins.bottom))},afterFit:function(){Pe.callback(this.options.afterFit,[this])},isHorizontal:function(){var h=this.options.position;return"top"===h||"bottom"===h},isFullWidth:function(){return this.options.fullWidth},getRightValue:function(h){if(Gr(h))return NaN;if(("number"==typeof h||h instanceof Number)&&!isFinite(h))return NaN;if(h)if(this.isHorizontal()){if(void 0!==h.x)return this.getRightValue(h.x)}else if(void 0!==h.y)return this.getRightValue(h.y);return h},_convertTicksToLabels:function(h){var N,k,ne,b=this;for(b.ticks=h.map(function(he){return he.value}),b.beforeTickToLabelConversion(),N=b.convertTicksToLabels(h)||b.ticks,b.afterTickToLabelConversion(),k=0,ne=h.length;kk-1?null:b.getPixelForDecimal(h*ne+(N?ne/2:0))},getPixelForDecimal:function(h){var b=this;return b._reversePixels&&(h=1-h),b._startPixel+h*b._length},getDecimalForPixel:function(h){var b=(h-this._startPixel)/this._length;return this._reversePixels?1-b:b},getBasePixel:function(){return this.getPixelForValue(this.getBaseValue())},getBaseValue:function(){var h=this,b=h.min,N=h.max;return h.beginAtZero?0:b<0&&N<0?N:b>0&&N>0?b:0},_autoSkip:function(h){var ft,wt,It,Cn,b=this,N=b.options.ticks,k=b._length,ne=N.maxTicksLimit||k/b._tickSize()+1,he=N.major.enabled?function Wl(h){var N,k,b=[];for(N=0,k=h.length;Nne)return function Pa(h,b,N){var he,Me,k=0,ne=b[0];for(N=Math.ceil(N),he=0;hehe)return Qe;return Math.max(he,1)}(he,h,0,ne),Me>0){for(ft=0,wt=Me-1;ft1?(Re-Qe)/(Me-1):null)?0:Qe-Cn,Qe),fc(h,It,Re,Pe.isNullOrUndef(Cn)?h.length:Re+Cn),ad(h)}return fc(h,It),ad(h)},_tickSize:function(){var h=this,b=h.options.ticks,N=Pe.toRadians(h.labelRotation),k=Math.abs(Math.cos(N)),ne=Math.abs(Math.sin(N)),he=h._getLabelSizes(),Me=b.autoSkipPadding||0,Qe=he?he.widest.width+Me:0,Re=he?he.highest.height+Me:0;return h.isHorizontal()?Re*k>Qe*ne?Qe/k:Re/ne:Re*ne=0&&(he=Qe),void 0!==ne&&(Qe=b.indexOf(ne))>=0&&(Me=Qe),h.minIndex=he,h.maxIndex=Me,h.min=b[he],h.max=b[Me]},buildTicks:function(){var h=this,b=h._getLabels(),N=h.minIndex,k=h.maxIndex;h.ticks=0===N&&k===b.length-1?b:b.slice(N,k+1)},getLabelForIndex:function(h,b){var N=this,k=N.chart;return k.getDatasetMeta(b).controller._getValueScaleId()===N.id?N.getRightValue(k.data.datasets[b].data[h]):N._getLabels()[h]},_configure:function(){var h=this,b=h.options.offset,N=h.ticks;je.prototype._configure.call(h),h.isHorizontal()||(h._reversePixels=!h._reversePixels),N&&(h._startValue=h.minIndex-(b?.5:0),h._valueRange=Math.max(N.length-(b?0:1),1))},getPixelForValue:function(h,b,N){var ne,he,Me,k=this;return!Nt(b)&&!Nt(N)&&(h=k.chart.data.datasets[N].data[b]),Nt(h)||(ne=k.isHorizontal()?h.x:h.y),(void 0!==ne||void 0!==h&&isNaN(b))&&(he=k._getLabels(),h=Pe.valueOrDefault(ne,h),b=-1!==(Me=he.indexOf(h))?Me:b,isNaN(b)&&(b=h)),k.getPixelForDecimal((b-k._startValue)/k._valueRange)},getPixelForTick:function(h){var b=this.ticks;return h<0||h>b.length-1?null:this.getPixelForValue(b[h],h+this.minIndex)},getValueForPixel:function(h){var b=this,N=Math.round(b._startValue+b.getDecimalForPixel(h)*b._valueRange);return Math.min(Math.max(N,0),b.ticks.length-1)},getBasePixel:function(){return this.bottom}});tn._defaults={position:"bottom"};var Ri=Pe.isNullOrUndef;var Fs=je.extend({getRightValue:function(h){return"string"==typeof h?+h:je.prototype.getRightValue.call(this,h)},handleTickRangeOptions:function(){var h=this,N=h.options.ticks;if(N.beginAtZero){var k=Pe.sign(h.min),ne=Pe.sign(h.max);k<0&&ne<0?h.max=0:k>0&&ne>0&&(h.min=0)}var he=void 0!==N.min||void 0!==N.suggestedMin,Me=void 0!==N.max||void 0!==N.suggestedMax;void 0!==N.min?h.min=N.min:void 0!==N.suggestedMin&&(h.min=null===h.min?N.suggestedMin:Math.min(h.min,N.suggestedMin)),void 0!==N.max?h.max=N.max:void 0!==N.suggestedMax&&(h.max=null===h.max?N.suggestedMax:Math.max(h.max,N.suggestedMax)),he!==Me&&h.min>=h.max&&(he?h.max=h.min+1:h.min=h.max-1),h.min===h.max&&(h.max++,N.beginAtZero||h.min--)},getTickLimit:function(){var ne,h=this,b=h.options.ticks,N=b.stepSize,k=b.maxTicksLimit;return N?ne=Math.ceil(h.max/N)-Math.floor(h.min/N)+1:(ne=h._computeTickLimit(),k=k||11),k&&(ne=Math.min(k,ne)),ne},_computeTickLimit:function(){return Number.POSITIVE_INFINITY},handleDirectionalChanges:Pe.noop,buildTicks:function(){var h=this,N=h.options.ticks,k=h.getTickLimit(),ne={maxTicks:k=Math.max(2,k),min:N.min,max:N.max,precision:N.precision,stepSize:Pe.valueOrDefault(N.fixedStepSize,N.stepSize)},he=h.ticks=function fs(h,b){var er,sr,Dr,oi,N=[],ne=h.stepSize,he=ne||1,Me=h.maxTicks-1,Qe=h.min,Re=h.max,ft=h.precision,wt=b.min,It=b.max,Cn=Pe.niceNum((It-wt)/Me/he)*he;if(Cn<1e-14&&Ri(Qe)&&Ri(Re))return[wt,It];(oi=Math.ceil(It/Cn)-Math.floor(wt/Cn))>Me&&(Cn=Pe.niceNum(oi*Cn/Me/he)*he),ne||Ri(ft)?er=Math.pow(10,Pe._decimalPlaces(Cn)):(er=Math.pow(10,ft),Cn=Math.ceil(Cn*er)/er),sr=Math.floor(wt/Cn)*Cn,Dr=Math.ceil(It/Cn)*Cn,ne&&(!Ri(Qe)&&Pe.almostWhole(Qe/Cn,Cn/1e3)&&(sr=Qe),!Ri(Re)&&Pe.almostWhole(Re/Cn,Cn/1e3)&&(Dr=Re)),oi=Pe.almostEquals(oi=(Dr-sr)/Cn,Math.round(oi),Cn/1e3)?Math.round(oi):Math.ceil(oi),sr=Math.round(sr*er)/er,Dr=Math.round(Dr*er)/er,N.push(Ri(Qe)?sr:Qe);for(var uo=1;uob.length-1?null:this.getPixelForValue(b[h])}});rn._defaults=Ra;var le=Pe.valueOrDefault,ae=Pe.math.log10;var Ve={position:"left",ticks:{callback:Io.formatters.logarithmic}};function st(h,b){return Pe.isFinite(h)&&h>=0?h:b}var zt=je.extend({determineDataLimits:function(){var Me,Qe,Re,ft,wt,It,h=this,b=h.options,N=h.chart,k=N.data.datasets,ne=h.isHorizontal();function he(oi){return ne?oi.xAxisID===h.id:oi.yAxisID===h.id}h.min=Number.POSITIVE_INFINITY,h.max=Number.NEGATIVE_INFINITY,h.minNotZero=Number.POSITIVE_INFINITY;var Cn=b.stacked;if(void 0===Cn)for(Me=0;Me0){var uo=Pe.min(oi),As=Pe.max(oi);h.min=Math.min(h.min,uo),h.max=Math.max(h.max,As)}})}else for(Me=0;Me0?h.min:h.max<1?Math.pow(10,Math.floor(ae(h.max))):1)},buildTicks:function(){var h=this,b=h.options.ticks,N=!h.isHorizontal(),k={min:st(b.min),max:st(b.max)},ne=h.ticks=function De(h,b){var Me,Qe,N=[],k=le(h.min,Math.pow(10,Math.floor(ae(b.min)))),ne=Math.floor(ae(b.max)),he=Math.ceil(b.max/Math.pow(10,ne));0===k?(Me=Math.floor(ae(b.minNotZero)),Qe=Math.floor(b.minNotZero/Math.pow(10,Me)),N.push(k),k=Qe*Math.pow(10,Me)):(Me=Math.floor(ae(k)),Qe=Math.floor(k/Math.pow(10,Me)));var Re=Me<0?Math.pow(10,Math.abs(Me)):1;do{N.push(k),10==++Qe&&(Qe=1,Re=++Me>=0?1:Re),k=Math.round(Qe*Math.pow(10,Me)*Re)/Re}while(Meb.length-1?null:this.getPixelForValue(b[h])},_getFirstTickValue:function(h){var b=Math.floor(ae(h));return Math.floor(h/Math.pow(10,b))*Math.pow(10,b)},_configure:function(){var h=this,b=h.min,N=0;je.prototype._configure.call(h),0===b&&(b=h._getFirstTickValue(h.minNotZero),N=le(h.options.ticks.fontSize,qr.global.defaultFontSize)/h._length),h._startValue=ae(b),h._valueOffset=N,h._valueRange=(ae(h.max)-ae(b))/(1-N)},getPixelForValue:function(h){var b=this,N=0;return(h=+b.getRightValue(h))>b.min&&h>0&&(N=(ae(h)-b._startValue)/b._valueRange+b._valueOffset),b.getPixelForDecimal(N)},getValueForPixel:function(h){var b=this,N=b.getDecimalForPixel(h);return 0===N&&0===b.min?0:Math.pow(10,b._startValue+(N-b._valueOffset)*b._valueRange)}});zt._defaults=Ve;var Gn=Pe.valueOrDefault,Er=Pe.valueAtIndexOrDefault,Nr=Pe.options.resolve,Mi={display:!0,animate:!0,position:"chartArea",angleLines:{display:!0,color:"rgba(0,0,0,0.1)",lineWidth:1,borderDash:[],borderDashOffset:0},gridLines:{circular:!1},ticks:{showLabelBackdrop:!0,backdropColor:"rgba(255,255,255,0.75)",backdropPaddingY:2,backdropPaddingX:2,callback:Io.formatters.linear},pointLabels:{display:!0,fontSize:10,callback:function(h){return h}}};function ao(h){var b=h.ticks;return b.display&&h.display?Gn(b.fontSize,qr.global.defaultFontSize)+2*b.backdropPaddingY:0}function Jo(h,b,N){return Pe.isArray(N)?{w:Pe.longestText(h,h.font,N),h:N.length*b}:{w:h.measureText(N).width,h:b}}function rs(h,b,N,k,ne){return h===k||h===ne?{start:b-N/2,end:b+N/2}:hne?{start:b-N,end:b}:{start:b,end:b+N}}function Ps(h){return 0===h||180===h?"center":h<180?"left":"right"}function Ul(h,b,N,k){var he,Me,ne=N.y+k/2;if(Pe.isArray(b))for(he=0,Me=b.length;he270||h<90)&&(N.y-=b.h)}function Rc(h){return Pe.isNumber(h)?h:0}var fu=Fs.extend({setDimensions:function(){var h=this;h.width=h.maxWidth,h.height=h.maxHeight,h.paddingTop=ao(h.options)/2,h.xCenter=Math.floor(h.width/2),h.yCenter=Math.floor((h.height-h.paddingTop)/2),h.drawingArea=Math.min(h.height-h.paddingTop,h.width)/2},determineDataLimits:function(){var h=this,b=h.chart,N=Number.POSITIVE_INFINITY,k=Number.NEGATIVE_INFINITY;Pe.each(b.data.datasets,function(ne,he){if(b.isDatasetVisible(he)){var Me=b.getDatasetMeta(he);Pe.each(ne.data,function(Qe,Re){var ft=+h.getRightValue(Qe);isNaN(ft)||Me.data[Re].hidden||(N=Math.min(ft,N),k=Math.max(ft,k))})}}),h.min=N===Number.POSITIVE_INFINITY?0:N,h.max=k===Number.NEGATIVE_INFINITY?0:k,h.handleTickRangeOptions()},_computeTickLimit:function(){return Math.ceil(this.drawingArea/ao(this.options))},convertTicksToLabels:function(){var h=this;Fs.prototype.convertTicksToLabels.call(h),h.pointLabels=h.chart.data.labels.map(function(){var b=Pe.callback(h.options.pointLabels.callback,arguments,h);return b||0===b?b:""})},getLabelForIndex:function(h,b){return+this.getRightValue(this.chart.data.datasets[b].data[h])},fit:function(){var h=this,b=h.options;b.display&&b.pointLabels.display?function ys(h){var ne,he,Me,b=Pe.options._parseFont(h.options.pointLabels),N={l:0,r:h.width,t:0,b:h.height-h.paddingTop},k={};h.ctx.font=b.string,h._pointLabelSizes=[];var Qe=h.chart.data.labels.length;for(ne=0;neN.r&&(N.r=wt.end,k.r=Re),It.startN.b&&(N.b=It.end,k.b=Re)}h.setReductions(h.drawingArea,N,k)}(h):h.setCenterPoint(0,0,0,0)},setReductions:function(h,b,N){var k=this,ne=b.l/Math.sin(N.l),he=Math.max(b.r-k.width,0)/Math.sin(N.r),Me=-b.t/Math.cos(N.t),Qe=-Math.max(b.b-(k.height-k.paddingTop),0)/Math.cos(N.b);ne=Rc(ne),he=Rc(he),Me=Rc(Me),Qe=Rc(Qe),k.drawingArea=Math.min(Math.floor(h-(ne+he)/2),Math.floor(h-(Me+Qe)/2)),k.setCenterPoint(ne,he,Me,Qe)},setCenterPoint:function(h,b,N,k){var ne=this,Qe=N+ne.drawingArea,Re=ne.height-ne.paddingTop-k-ne.drawingArea;ne.xCenter=Math.floor((h+ne.drawingArea+(ne.width-b-ne.drawingArea))/2+ne.left),ne.yCenter=Math.floor((Qe+Re)/2+ne.top+ne.paddingTop)},getIndexAngle:function(h){var b=this.chart,he=(h*(360/b.data.labels.length)+((b.options||{}).startAngle||0))%360;return(he<0?he+360:he)*Math.PI*2/360},getDistanceFromCenterForValue:function(h){var b=this;if(Pe.isNullOrUndef(h))return NaN;var N=b.drawingArea/(b.max-b.min);return b.options.ticks.reverse?(b.max-h)*N:(h-b.min)*N},getPointPosition:function(h,b){var N=this,k=N.getIndexAngle(h)-Math.PI/2;return{x:Math.cos(k)*b+N.xCenter,y:Math.sin(k)*b+N.yCenter}},getPointPositionForValue:function(h,b){return this.getPointPosition(h,this.getDistanceFromCenterForValue(b))},getBasePosition:function(h){var b=this,N=b.min,k=b.max;return b.getPointPositionForValue(h||0,b.beginAtZero?0:N<0&&k<0?k:N>0&&k>0?N:0)},_drawGrid:function(){var Qe,Re,ft,h=this,b=h.ctx,N=h.options,k=N.gridLines,ne=N.angleLines,he=Gn(ne.lineWidth,k.lineWidth),Me=Gn(ne.color,k.color);if(N.pointLabels.display&&function mu(h){var b=h.ctx,N=h.options,k=N.pointLabels,ne=ao(N),he=h.getDistanceFromCenterForValue(N.ticks.reverse?h.min:h.max),Me=Pe.options._parseFont(k);b.save(),b.font=Me.string,b.textBaseline="middle";for(var Qe=h.chart.data.labels.length-1;Qe>=0;Qe--){var ft=h.getPointPosition(Qe,he+(0===Qe?ne/2:0)+5),wt=Er(k.fontColor,Qe,qr.global.defaultFontColor);b.fillStyle=wt;var It=h.getIndexAngle(Qe),Cn=Pe.toDegrees(It);b.textAlign=Ps(Cn),eu(Cn,h._pointLabelSizes[Qe],ft),Ul(b,h.pointLabels[Qe],ft,Me.lineHeight)}b.restore()}(h),k.display&&Pe.each(h.ticks,function(wt,It){0!==It&&(Re=h.getDistanceFromCenterForValue(h.ticksAsNumbers[It]),function wu(h,b,N,k){var ft,ne=h.ctx,he=b.circular,Me=h.chart.data.labels.length,Qe=Er(b.color,k-1),Re=Er(b.lineWidth,k-1);if((he||Me)&&Qe&&Re){if(ne.save(),ne.strokeStyle=Qe,ne.lineWidth=Re,ne.setLineDash&&(ne.setLineDash(b.borderDash||[]),ne.lineDashOffset=b.borderDashOffset||0),ne.beginPath(),he)ne.arc(h.xCenter,h.yCenter,N,0,2*Math.PI);else{ft=h.getPointPosition(0,N),ne.moveTo(ft.x,ft.y);for(var wt=1;wt=0;Qe--)Re=h.getDistanceFromCenterForValue(N.ticks.reverse?h.min:h.max),ft=h.getPointPosition(Qe,Re),b.beginPath(),b.moveTo(h.xCenter,h.yCenter),b.lineTo(ft.x,ft.y),b.stroke();b.restore()}},_drawLabels:function(){var h=this,b=h.ctx,k=h.options.ticks;if(k.display){var Qe,Re,ne=h.getIndexAngle(0),he=Pe.options._parseFont(k),Me=Gn(k.fontColor,qr.global.defaultFontColor);b.save(),b.font=he.string,b.translate(h.xCenter,h.yCenter),b.rotate(ne),b.textAlign="center",b.textBaseline="middle",Pe.each(h.ticks,function(ft,wt){0===wt&&!k.reverse||(Qe=h.getDistanceFromCenterForValue(h.ticksAsNumbers[wt]),k.showLabelBackdrop&&(Re=b.measureText(ft).width,b.fillStyle=k.backdropColor,b.fillRect(-Re/2-k.backdropPaddingX,-Qe-he.size/2-k.backdropPaddingY,Re+2*k.backdropPaddingX,he.size+2*k.backdropPaddingY)),b.fillStyle=Me,b.fillText(ft,0,-Qe))}),b.restore()}},_drawTitle:Pe.noop});fu._defaults=Mi;var $c=Pe._deprecated,pu=Pe.options.resolve,vc=Pe.valueOrDefault,La=Number.MIN_SAFE_INTEGER||-9007199254740991,al=Number.MAX_SAFE_INTEGER||9007199254740991,rl={millisecond:{common:!0,size:1,steps:1e3},second:{common:!0,size:1e3,steps:60},minute:{common:!0,size:6e4,steps:60},hour:{common:!0,size:36e5,steps:24},day:{common:!0,size:864e5,steps:30},week:{common:!1,size:6048e5,steps:4},month:{common:!0,size:2628e6,steps:12},quarter:{common:!1,size:7884e6,steps:4},year:{common:!0,size:3154e7}},xa=Object.keys(rl);function Tu(h,b){return h-b}function Pu(h){return Pe.valueOrDefault(h.time.min,h.ticks.min)}function za(h){return Pe.valueOrDefault(h.time.max,h.ticks.max)}function Cu(h,b,N,k){var ne=function Os(h,b,N){for(var he,Me,Qe,k=0,ne=h.length-1;k>=0&&k<=ne;){if(Qe=h[he=k+ne>>1],!(Me=h[he-1]||null))return{lo:null,hi:Qe};if(Qe[b]N))return{lo:Me,hi:Qe};ne=he-1}}return{lo:Qe,hi:null}}(h,b,N),he=ne.lo?ne.hi?ne.lo:h[h.length-2]:h[0],Me=ne.lo?ne.hi?ne.hi:h[h.length-1]:h[1],Qe=Me[b]-he[b];return he[k]+(Me[k]-he[k])*(Qe?(N-he[b])/Qe:0)}function ld(h,b){var N=h._adapter,k=h.options.time,ne=k.parser,he=ne||k.format,Me=b;return"function"==typeof ne&&(Me=ne(Me)),Pe.isFinite(Me)||(Me="string"==typeof he?N.parse(Me,he):N.parse(Me)),null!==Me?+Me:(!ne&&"function"==typeof he&&(Me=he(b),Pe.isFinite(Me)||(Me=N.parse(Me))),Me)}function Hc(h,b){if(Pe.isNullOrUndef(b))return null;var N=h.options.time,k=ld(h,h.getRightValue(b));return null===k||N.round&&(k=+h._adapter.startOf(k,N.round)),k}function Vu(h,b,N,k){var he,Me,ne=xa.length;for(he=xa.indexOf(h);he=0&&(b[Re].major=!0);return b}(h,k,ne,N):k}var Tp=je.extend({initialize:function(){this.mergeTicksOptions(),je.prototype.initialize.call(this)},update:function(){var h=this,b=h.options,N=b.time||(b.time={}),k=h._adapter=new Go._date(b.adapters.date);return $c("time scale",N.format,"time.format","time.parser"),$c("time scale",N.min,"time.min","ticks.min"),$c("time scale",N.max,"time.max","ticks.max"),Pe.mergeIf(N.displayFormats,k.formats()),je.prototype.update.apply(h,arguments)},getRightValue:function(h){return h&&void 0!==h.t&&(h=h.t),je.prototype.getRightValue.call(this,h)},determineDataLimits:function(){var wt,It,Cn,er,sr,Dr,oi,h=this,b=h.chart,N=h._adapter,k=h.options,ne=k.time.unit||"day",he=al,Me=La,Qe=[],Re=[],ft=[],uo=h._getLabels();for(wt=0,Cn=uo.length;wt1?function En(h){var k,ne,he,b={},N=[];for(k=0,ne=h.length;k1e5*Re)throw b+" and "+N+" are too far apart with stepSize of "+Re+" "+Qe;for(Cn=wt;Cn=b&&er<=N&&Qe.push(er);return h.min=b,h.max=N,h._unit=he.unit||(ne.autoSkip?Vu(he.minUnit,h.min,h.max,Re):function ud(h,b,N,k,ne){var he,Me;for(he=xa.length-1;he>=xa.indexOf(N);he--)if(rl[Me=xa[he]].common&&h._adapter.diff(ne,k,Me)>=b-1)return Me;return xa[N?xa.indexOf(N):0]}(h,Qe.length,he.minUnit,h.min,h.max)),h._majorUnit=ne.major.enabled&&"year"!==h._unit?function md(h){for(var b=xa.indexOf(h)+1,N=xa.length;bb&&ft=0&&h0?Qe:1}});Tp._defaults={position:"bottom",distribution:"linear",bounds:"data",adapters:{},time:{parser:!1,unit:!1,round:!1,displayFormat:!1,isoWeekday:!1,minUnit:"millisecond",displayFormats:{}},ticks:{autoSkip:!1,source:"auto",major:{enabled:!1}}};var Hd={category:tn,linear:rn,logarithmic:zt,radialLinear:fu,time:Tp},Bf={datetime:"MMM D, YYYY, h:mm:ss a",millisecond:"h:mm:ss.SSS a",second:"h:mm:ss a",minute:"h:mm a",hour:"hA",day:"MMM D",week:"ll",month:"MMM YYYY",quarter:"[Q]Q - YYYY",year:"YYYY"};Go._date.override("function"==typeof r?{_id:"moment",formats:function(){return Bf},parse:function(h,b){return"string"==typeof h&&"string"==typeof b?h=r(h,b):h instanceof r||(h=r(h)),h.isValid()?h.valueOf():null},format:function(h,b){return r(h).format(b)},add:function(h,b,N){return r(h).add(b,N).valueOf()},diff:function(h,b,N){return r(h).diff(r(b),N)},startOf:function(h,b,N){return h=r(h),"isoWeek"===b?h.isoWeekday(N).valueOf():h.startOf(b).valueOf()},endOf:function(h,b){return r(h).endOf(b).valueOf()},_create:function(h){return r(h)}}:{}),qr._set("global",{plugins:{filler:{propagate:!0}}});var gd={dataset:function(h){var b=h.fill,N=h.chart,k=N.getDatasetMeta(b),he=k&&N.isDatasetVisible(b)&&k.dataset._children||[],Me=he.length||0;return Me?function(Qe,Re){return Re=N)&&he;switch(ne){case"bottom":return"start";case"top":return"end";case"zero":return"origin";case"origin":case"start":case"end":return ne;default:return!1}}function _u(h){return(h.el._scale||{}).getPointPositionForValue?function xf(h){var Me,Qe,Re,ft,wt,b=h.el._scale,N=b.options,k=b.chart.data.labels.length,ne=h.fill,he=[];if(!k)return null;for(Qe=N.ticks.reverse?b.min:b.max,Re=b.getPointPositionForValue(0,Me=N.ticks.reverse?b.max:b.min),ft=0;ft0;--he)h.arc(Me,Qe,Re,N[he].angle,N[he-1].angle,!0);return}for(h.lineTo(N[ne-1].x,N[ne-1].y),he=ne-1;he>0;--he)Pe.canvas.lineTo(h,N[he],N[he-1],!0)}}function Ne(h,b,N,k,ne,he){var Cn,er,sr,Dr,oi,uo,As,as,Me=b.length,Qe=k.spanGaps,Re=[],ft=[],wt=0,It=0;for(h.beginPath(),Cn=0,er=Me;Cn=0;--ne)(k=b[ne].$filler)&&k.visible&&(Qe=(he=k.el)._children||[],ft=(Me=he._view).backgroundColor||qr.global.defaultColor,(Re=k.mapper)&&ft&&Qe.length&&(Pe.canvas.clipArea(N,h.chartArea),Ne(N,Qe,Re,Me,ft,he._loop),Pe.canvas.unclipArea(N)))}},Ye=Pe.rtl.getRtlAdapter,Mt=Pe.noop,un=Pe.valueOrDefault;function Mn(h,b){return h.usePointStyle&&h.boxWidth>b?b:h.boxWidth}qr._set("global",{legend:{display:!0,position:"top",align:"center",fullWidth:!0,reverse:!1,weight:1e3,onClick:function(h,b){var N=b.datasetIndex,k=this.chart,ne=k.getDatasetMeta(N);ne.hidden=null===ne.hidden?!k.data.datasets[N].hidden:null,k.update()},onHover:null,onLeave:null,labels:{boxWidth:40,padding:10,generateLabels:function(h){var b=h.data.datasets,N=h.options.legend||{},k=N.labels&&N.labels.usePointStyle;return h._getSortedDatasetMetas().map(function(ne){var he=ne.controller.getStyle(k?0:void 0);return{text:b[ne.index].label,fillStyle:he.backgroundColor,hidden:!h.isDatasetVisible(ne.index),lineCap:he.borderCapStyle,lineDash:he.borderDash,lineDashOffset:he.borderDashOffset,lineJoin:he.borderJoinStyle,lineWidth:he.borderWidth,strokeStyle:he.borderColor,pointStyle:he.pointStyle,rotation:he.rotation,datasetIndex:ne.index}},this)}}},legendCallback:function(h){var k,ne,he,b=document.createElement("ul"),N=h.data.datasets;for(b.setAttribute("class",h.id+"-legend"),k=0,ne=N.length;kRe.width)&&(It+=Me+N.padding,wt[wt.length-(as>0?0:1)]=0),Qe[as]={left:0,top:0,width:Na,height:Me},wt[wt.length-1]+=Na+N.padding}),Re.height+=It}else{var Cn=N.padding,er=h.columnWidths=[],sr=h.columnHeights=[],Dr=N.padding,oi=0,uo=0;Pe.each(h.legendItems,function(As,as){var Na=Mn(N,Me)+Me/2+ne.measureText(As.text).width;as>0&&uo+Me+2*Cn>Re.height&&(Dr+=oi+N.padding,er.push(oi),sr.push(uo),oi=0,uo=0),oi=Math.max(oi,Na),uo+=Me+Cn,Qe[as]={left:0,top:0,width:Na,height:Me}}),Dr+=oi,er.push(oi),sr.push(uo),Re.width+=Dr}h.width=Re.width,h.height=Re.height}else h.width=Re.width=h.height=Re.height=0},afterFit:Mt,isHorizontal:function(){return"top"===this.options.position||"bottom"===this.options.position},draw:function(){var h=this,b=h.options,N=b.labels,k=qr.global,ne=k.defaultColor,he=k.elements.line,Me=h.height,Qe=h.columnHeights,Re=h.width,ft=h.lineWidths;if(b.display){var Dr,wt=Ye(b.rtl,h.left,h.minSize.width),It=h.ctx,Cn=un(N.fontColor,k.defaultFontColor),er=Pe.options._parseFont(N),sr=er.size;It.textAlign=wt.textAlign("left"),It.textBaseline="middle",It.lineWidth=.5,It.strokeStyle=Cn,It.fillStyle=Cn,It.font=er.string;var oi=Mn(N,sr),uo=h.legendHitBoxes,ma=function(il,dl){switch(b.align){case"start":return N.padding;case"end":return il-dl;default:return(il-dl+N.padding)/2}},Na=h.isHorizontal();Dr=Na?{x:h.left+ma(Re,ft[0]),y:h.top+N.padding,line:0}:{x:h.left+N.padding,y:h.top+ma(Me,Qe[0]),line:0},Pe.rtl.overrideTextDirection(h.ctx,b.textDirection);var Pl=sr+N.padding;Pe.each(h.legendItems,function(il,dl){var Nl=It.measureText(il.text).width,Qu=oi+sr/2+Nl,ac=Dr.x,wa=Dr.y;wt.setWidth(h.minSize.width),Na?dl>0&&ac+Qu+N.padding>h.left+h.minSize.width&&(wa=Dr.y+=Pl,Dr.line++,ac=Dr.x=h.left+ma(Re,ft[Dr.line])):dl>0&&wa+Pl>h.top+h.minSize.height&&(ac=Dr.x=ac+h.columnWidths[Dr.line]+N.padding,Dr.line++,wa=Dr.y=h.top+ma(Me,Qe[Dr.line]));var nc=wt.x(ac);(function(il,dl,Nl){if(!(isNaN(oi)||oi<=0)){It.save();var Qu=un(Nl.lineWidth,he.borderWidth);if(It.fillStyle=un(Nl.fillStyle,ne),It.lineCap=un(Nl.lineCap,he.borderCapStyle),It.lineDashOffset=un(Nl.lineDashOffset,he.borderDashOffset),It.lineJoin=un(Nl.lineJoin,he.borderJoinStyle),It.lineWidth=Qu,It.strokeStyle=un(Nl.strokeStyle,ne),It.setLineDash&&It.setLineDash(un(Nl.lineDash,he.borderDash)),N&&N.usePointStyle){var ac=oi*Math.SQRT2/2,wa=wt.xPlus(il,oi/2);Pe.canvas.drawPoint(It,Nl.pointStyle,ac,wa,dl+sr/2,Nl.rotation)}else It.fillRect(wt.leftForLtr(il,oi),dl,oi,sr),0!==Qu&&It.strokeRect(wt.leftForLtr(il,oi),dl,oi,sr);It.restore()}})(nc,wa,il),uo[dl].left=wt.leftForLtr(nc,uo[dl].width),uo[dl].top=wa,function(il,dl,Nl,Qu){var ac=sr/2,wa=wt.xPlus(il,oi+ac),nc=dl+ac;It.fillText(Nl.text,wa,nc),Nl.hidden&&(It.beginPath(),It.lineWidth=2,It.moveTo(wa,nc),It.lineTo(wt.xPlus(wa,Qu),nc),It.stroke())}(nc,wa,il,Nl),Na?Dr.x+=Qu+N.padding:Dr.y+=Pl}),Pe.rtl.restoreTextDirection(h.ctx,b.textDirection)}},_getLegendItemAt:function(h,b){var k,ne,he,N=this;if(h>=N.left&&h<=N.right&&b>=N.top&&b<=N.bottom)for(he=N.legendHitBoxes,k=0;k=(ne=he[k]).left&&h<=ne.left+ne.width&&b>=ne.top&&b<=ne.top+ne.height)return N.legendItems[k];return null},handleEvent:function(h){var ne,b=this,N=b.options,k="mouseup"===h.type?"click":h.type;if("mousemove"===k){if(!N.onHover&&!N.onLeave)return}else{if("click"!==k)return;if(!N.onClick)return}ne=b._getLegendItemAt(h.x,h.y),"click"===k?ne&&N.onClick&&N.onClick.call(b,h.native,ne):(N.onLeave&&ne!==b._hoveredItem&&(b._hoveredItem&&N.onLeave.call(b,h.native,b._hoveredItem),b._hoveredItem=ne),N.onHover&&ne&&N.onHover.call(b,h.native,ne))}});function zi(h,b){var N=new ni({ctx:h.ctx,options:b,chart:h});Xl.configure(h,N,b),Xl.addBox(h,N),h.legend=N}var Wo={id:"legend",_element:ni,beforeInit:function(h){var b=h.options.legend;b&&zi(h,b)},beforeUpdate:function(h){var b=h.options.legend,N=h.legend;b?(Pe.mergeIf(b,qr.global.legend),N?(Xl.configure(h,N,b),N.options=b):zi(h,b)):N&&(Xl.removeBox(h,N),delete h.legend)},afterEvent:function(h,b){var N=h.legend;N&&N.handleEvent(b)}},Qo=Pe.noop;qr._set("global",{title:{display:!1,fontStyle:"bold",fullWidth:!0,padding:10,position:"top",text:"",weight:2e3}});var ya=nr.extend({initialize:function(h){Pe.extend(this,h),this.legendHitBoxes=[]},beforeUpdate:Qo,update:function(h,b,N){var k=this;return k.beforeUpdate(),k.maxWidth=h,k.maxHeight=b,k.margins=N,k.beforeSetDimensions(),k.setDimensions(),k.afterSetDimensions(),k.beforeBuildLabels(),k.buildLabels(),k.afterBuildLabels(),k.beforeFit(),k.fit(),k.afterFit(),k.afterUpdate(),k.minSize},afterUpdate:Qo,beforeSetDimensions:Qo,setDimensions:function(){var h=this;h.isHorizontal()?(h.width=h.maxWidth,h.left=0,h.right=h.width):(h.height=h.maxHeight,h.top=0,h.bottom=h.height),h.paddingLeft=0,h.paddingTop=0,h.paddingRight=0,h.paddingBottom=0,h.minSize={width:0,height:0}},afterSetDimensions:Qo,beforeBuildLabels:Qo,buildLabels:Qo,afterBuildLabels:Qo,beforeFit:Qo,fit:function(){var he,h=this,b=h.options,N=h.minSize={},k=h.isHorizontal();b.display?(he=(Pe.isArray(b.text)?b.text.length:1)*Pe.options._parseFont(b).lineHeight+2*b.padding,h.width=N.width=k?h.maxWidth:he,h.height=N.height=k?he:h.maxHeight):h.width=N.width=h.height=N.height=0},afterFit:Qo,isHorizontal:function(){var h=this.options.position;return"top"===h||"bottom"===h},draw:function(){var h=this,b=h.ctx,N=h.options;if(N.display){var It,Cn,er,k=Pe.options._parseFont(N),ne=k.lineHeight,he=ne/2+N.padding,Me=0,Qe=h.top,Re=h.left,ft=h.bottom,wt=h.right;b.fillStyle=Pe.valueOrDefault(N.fontColor,qr.global.defaultFontColor),b.font=k.string,h.isHorizontal()?(Cn=Re+(wt-Re)/2,er=Qe+he,It=wt-Re):(Cn="left"===N.position?Re+he:wt-he,er=Qe+(ft-Qe)/2,It=ft-Qe,Me=Math.PI*("left"===N.position?-.5:.5)),b.save(),b.translate(Cn,er),b.rotate(Me),b.textAlign="center",b.textBaseline="middle";var sr=N.text;if(Pe.isArray(sr))for(var Dr=0,oi=0;oi=0;Me--){var Qe=k[Me];if(ne(Qe))return Qe}},Pe.isNumber=function(k){return!isNaN(parseFloat(k))&&isFinite(k)},Pe.almostEquals=function(k,ne,he){return Math.abs(k-ne)=k},Pe.max=function(k){return k.reduce(function(ne,he){return isNaN(he)?ne:Math.max(ne,he)},Number.NEGATIVE_INFINITY)},Pe.min=function(k){return k.reduce(function(ne,he){return isNaN(he)?ne:Math.min(ne,he)},Number.POSITIVE_INFINITY)},Pe.sign=Math.sign?function(k){return Math.sign(k)}:function(k){return 0==(k=+k)||isNaN(k)?k:k>0?1:-1},Pe.toRadians=function(k){return k*(Math.PI/180)},Pe.toDegrees=function(k){return k*(180/Math.PI)},Pe._decimalPlaces=function(k){if(Pe.isFinite(k)){for(var ne=1,he=0;Math.round(k*ne)/ne!==k;)ne*=10,he++;return he}},Pe.getAngleFromPoint=function(k,ne){var he=ne.x-k.x,Me=ne.y-k.y,Qe=Math.sqrt(he*he+Me*Me),Re=Math.atan2(Me,he);return Re<-.5*Math.PI&&(Re+=2*Math.PI),{angle:Re,distance:Qe}},Pe.distanceBetweenPoints=function(k,ne){return Math.sqrt(Math.pow(ne.x-k.x,2)+Math.pow(ne.y-k.y,2))},Pe.aliasPixel=function(k){return k%2==0?0:.5},Pe._alignPixel=function(k,ne,he){var Me=k.currentDevicePixelRatio,Qe=he/2;return Math.round((ne-Qe)*Me)/Me+Qe},Pe.splineCurve=function(k,ne,he,Me){var Qe=k.skip?ne:k,Re=ne,ft=he.skip?ne:he,wt=Math.sqrt(Math.pow(Re.x-Qe.x,2)+Math.pow(Re.y-Qe.y,2)),It=Math.sqrt(Math.pow(ft.x-Re.x,2)+Math.pow(ft.y-Re.y,2)),Cn=wt/(wt+It),er=It/(wt+It),sr=Me*(Cn=isNaN(Cn)?0:Cn),Dr=Me*(er=isNaN(er)?0:er);return{previous:{x:Re.x-sr*(ft.x-Qe.x),y:Re.y-sr*(ft.y-Qe.y)},next:{x:Re.x+Dr*(ft.x-Qe.x),y:Re.y+Dr*(ft.y-Qe.y)}}},Pe.EPSILON=Number.EPSILON||1e-14,Pe.splineCurveMonotone=function(k){var Me,Qe,Re,ft,It,Cn,er,sr,Dr,ne=(k||[]).map(function(oi){return{model:oi._model,deltaK:0,mK:0}}),he=ne.length;for(Me=0;Me0?ne[Me-1]:null,(ft=Me0?ne[Me-1]:null)&&!Qe.model.skip&&(Re.model.controlPointPreviousX=Re.model.x-(Dr=(Re.model.x-Qe.model.x)/3),Re.model.controlPointPreviousY=Re.model.y-Dr*Re.mK),ft&&!ft.model.skip&&(Re.model.controlPointNextX=Re.model.x+(Dr=(ft.model.x-Re.model.x)/3),Re.model.controlPointNextY=Re.model.y+Dr*Re.mK))},Pe.nextItem=function(k,ne,he){return he?ne>=k.length-1?k[0]:k[ne+1]:ne>=k.length-1?k[k.length-1]:k[ne+1]},Pe.previousItem=function(k,ne,he){return he?ne<=0?k[k.length-1]:k[ne-1]:ne<=0?k[0]:k[ne-1]},Pe.niceNum=function(k,ne){var he=Math.floor(Pe.log10(k)),Me=k/Math.pow(10,he);return(ne?Me<1.5?1:Me<3?2:Me<7?5:10:Me<=1?1:Me<=2?2:Me<=5?5:10)*Math.pow(10,he)},Pe.requestAnimFrame=typeof window>"u"?function(k){k()}:window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(k){return window.setTimeout(k,1e3/60)},Pe.getRelativePosition=function(k,ne){var he,Me,Qe=k.originalEvent||k,Re=k.target||k.srcElement,ft=Re.getBoundingClientRect(),wt=Qe.touches;wt&&wt.length>0?(he=wt[0].clientX,Me=wt[0].clientY):(he=Qe.clientX,Me=Qe.clientY);var It=parseFloat(Pe.getStyle(Re,"padding-left")),Cn=parseFloat(Pe.getStyle(Re,"padding-top")),er=parseFloat(Pe.getStyle(Re,"padding-right")),sr=parseFloat(Pe.getStyle(Re,"padding-bottom")),oi=ft.bottom-ft.top-Cn-sr;return{x:he=Math.round((he-ft.left-It)/(ft.right-ft.left-It-er)*Re.width/ne.currentDevicePixelRatio),y:Me=Math.round((Me-ft.top-Cn)/oi*Re.height/ne.currentDevicePixelRatio)}},Pe.getConstraintWidth=function(k){return N(k,"max-width","clientWidth")},Pe.getConstraintHeight=function(k){return N(k,"max-height","clientHeight")},Pe._calculatePadding=function(k,ne,he){return(ne=Pe.getStyle(k,ne)).indexOf("%")>-1?he*parseInt(ne,10)/100:parseInt(ne,10)},Pe._getParentNode=function(k){var ne=k.parentNode;return ne&&"[object ShadowRoot]"===ne.toString()&&(ne=ne.host),ne},Pe.getMaximumWidth=function(k){var ne=Pe._getParentNode(k);if(!ne)return k.clientWidth;var he=ne.clientWidth,Re=he-Pe._calculatePadding(ne,"padding-left",he)-Pe._calculatePadding(ne,"padding-right",he),ft=Pe.getConstraintWidth(k);return isNaN(ft)?Re:Math.min(Re,ft)},Pe.getMaximumHeight=function(k){var ne=Pe._getParentNode(k);if(!ne)return k.clientHeight;var he=ne.clientHeight,Re=he-Pe._calculatePadding(ne,"padding-top",he)-Pe._calculatePadding(ne,"padding-bottom",he),ft=Pe.getConstraintHeight(k);return isNaN(ft)?Re:Math.min(Re,ft)},Pe.getStyle=function(k,ne){return k.currentStyle?k.currentStyle[ne]:document.defaultView.getComputedStyle(k,null).getPropertyValue(ne)},Pe.retinaScale=function(k,ne){var he=k.currentDevicePixelRatio=ne||typeof window<"u"&&window.devicePixelRatio||1;if(1!==he){var Me=k.canvas,Qe=k.height,Re=k.width;Me.height=Qe*he,Me.width=Re*he,k.ctx.scale(he,he),!Me.style.height&&!Me.style.width&&(Me.style.height=Qe+"px",Me.style.width=Re+"px")}},Pe.fontString=function(k,ne,he){return ne+" "+k+"px "+he},Pe.longestText=function(k,ne,he,Me){var Qe=(Me=Me||{}).data=Me.data||{},Re=Me.garbageCollect=Me.garbageCollect||[];Me.font!==ne&&(Qe=Me.data={},Re=Me.garbageCollect=[],Me.font=ne),k.font=ne;var It,Cn,er,sr,Dr,ft=0,wt=he.length;for(It=0;Ithe.length){for(It=0;ItMe&&(Me=Re),Me},Pe.numberOfLabelLines=function(k){var ne=1;return Pe.each(k,function(he){Pe.isArray(he)&&he.length>ne&&(ne=he.length)}),ne},Pe.color=Tt?function(k){return k instanceof CanvasGradient&&(k=qr.global.defaultColor),Tt(k)}:function(k){return console.error("Color.js not found!"),k},Pe.getHoverColor=function(k){return k instanceof CanvasPattern||k instanceof CanvasGradient?k:Pe.color(k).saturate(.5).darken(.1).rgbString()}}(),kn._adapters=Go,kn.Animation=dn,kn.animationService=Ge,kn.controllers=Bi,kn.DatasetController=Ai,kn.defaults=qr,kn.Element=nr,kn.elements=ss,kn.Interaction=vl,kn.layouts=Xl,kn.platform=Je,kn.plugins=en,kn.Scale=je,kn.scaleService=fi,kn.Ticks=Io,kn.Tooltip=Lc,kn.helpers.each(Hd,function(h,b){kn.scaleService.registerScaleType(b,h,h._defaults)}),pc)pc.hasOwnProperty(od)&&kn.plugins.register(pc[od]);kn.platform.initialize();var Ed=kn;return typeof window<"u"&&(window.Chart=kn),kn.Chart=kn,kn.Legend=pc.legend._element,kn.Title=pc.title._element,kn.pluginService=kn.plugins,kn.PluginBase=kn.Element.extend({}),kn.canvasHelpers=kn.helpers.canvas,kn.layoutService=kn.layouts,kn.LinearScaleBase=Fs,kn.helpers.each(["Bar","Bubble","Doughnut","Line","PolarArea","Radar","Scatter"],function(h){kn[h]=function(b,N){return new kn(b,kn.helpers.merge(N||{},{type:h.charAt(0).toLowerCase()+h.slice(1)}))}}),Ed}(function(){try{return s(16738)}catch{}}())},82885:(E,C)=>{var r;!function(){"use strict";var a={}.hasOwnProperty;function u(){for(var e=[],f=0;f{E.exports=function(s,r){for(var a=[],c=0;c{"use strict";var r=s(35311),a={"text/plain":"Text","text/html":"Url",default:"Text"};E.exports=function e(f,m){var T,M,w,D,U,W,$=!1;m||(m={}),T=m.debug||!1;try{if(w=r(),D=document.createRange(),U=document.getSelection(),(W=document.createElement("span")).textContent=f,W.ariaHidden="true",W.style.all="unset",W.style.position="fixed",W.style.top=0,W.style.clip="rect(0, 0, 0, 0)",W.style.whiteSpace="pre",W.style.webkitUserSelect="text",W.style.MozUserSelect="text",W.style.msUserSelect="text",W.style.userSelect="text",W.addEventListener("copy",function(F){F.stopPropagation(),m.format&&(F.preventDefault(),typeof F.clipboardData>"u"?(T&&console.warn("unable to use e.clipboardData"),T&&console.warn("trying IE specific stuff"),window.clipboardData.clearData(),window.clipboardData.setData(a[m.format]||a.default,f)):(F.clipboardData.clearData(),F.clipboardData.setData(m.format,f))),m.onCopy&&(F.preventDefault(),m.onCopy(F.clipboardData))}),document.body.appendChild(W),D.selectNodeContents(W),U.addRange(D),!document.execCommand("copy"))throw new Error("copy command was unsuccessful");$=!0}catch(F){T&&console.error("unable to copy using execCommand: ",F),T&&console.warn("trying IE specific stuff");try{window.clipboardData.setData(m.format||"text",f),m.onCopy&&m.onCopy(window.clipboardData),$=!0}catch(X){T&&console.error("unable to copy using clipboardData: ",X),T&&console.error("falling back to prompt"),M=function u(f){var m=(/mac os x/i.test(navigator.userAgent)?"\u2318":"Ctrl")+"+C";return f.replace(/#{\s*key\s*}/g,m)}("message"in m?m.message:"Copy to clipboard: #{key}, Enter"),window.prompt(M,f)}}finally{U&&("function"==typeof U.removeRange?U.removeRange(D):U.removeAllRanges()),W&&document.body.removeChild(W),w()}return $}},43987:(E,C,s)=>{"use strict";var r=s(75242);E.exports=r},99556:(E,C,s)=>{"use strict";var r=s(10323);E.exports=r},39287:(E,C,s)=>{"use strict";var r=s(8748);E.exports=r},25272:(E,C,s)=>{"use strict";var r=s(71873);E.exports=r},54450:(E,C,s)=>{"use strict";var r=s(19095);E.exports=r},39557:(E,C,s)=>{"use strict";var r=s(52049);E.exports=r},61611:(E,C,s)=>{"use strict";var r=s(87054);E.exports=r},4412:(E,C,s)=>{"use strict";var r=s(30252);E.exports=r},22549:(E,C,s)=>{"use strict";var r=s(45284);E.exports=r},47646:(E,C,s)=>{"use strict";var r=s(70157);E.exports=r},78663:(E,C,s)=>{"use strict";var r=s(640);s(41554),E.exports=r},48498:(E,C,s)=>{"use strict";var r=s(50320);E.exports=r},4922:(E,C,s)=>{"use strict";var r=s(93006);E.exports=r},95190:(E,C,s)=>{"use strict";var r=s(36226);E.exports=r},78525:(E,C,s)=>{"use strict";var r=s(21968);E.exports=r},21064:(E,C,s)=>{"use strict";var r=s(87259);E.exports=r},65641:(E,C,s)=>{"use strict";var r=s(62021);E.exports=r},21693:(E,C,s)=>{"use strict";var r=s(57682);E.exports=r},88907:(E,C,s)=>{"use strict";var r=s(94222);E.exports=r},41432:(E,C,s)=>{"use strict";var r=s(1162);E.exports=r},7398:(E,C,s)=>{"use strict";var r=s(82805);E.exports=r},67221:(E,C,s)=>{"use strict";var r=s(26498);s(68333),E.exports=r},67447:(E,C,s)=>{"use strict";var r=s(44850);E.exports=r},58811:(E,C,s)=>{"use strict";var r=s(9634);E.exports=r},19573:(E,C,s)=>{"use strict";var r=s(96551);s(43548),s(55461),s(5737),s(71985),E.exports=r},10226:(E,C,s)=>{"use strict";var r=s(98908);E.exports=r},56378:(E,C,s)=>{"use strict";var r=s(55434);E.exports=r},74771:(E,C,s)=>{"use strict";s(3934),s(261);var r=s(13544);E.exports=r.Array.from},8412:(E,C,s)=>{"use strict";s(2862);var r=s(13544);E.exports=r.Array.isArray},77377:(E,C,s)=>{"use strict";s(1625);var r=s(97911);E.exports=r("Array").concat},399:(E,C,s)=>{"use strict";s(1285),s(17221);var r=s(97911);E.exports=r("Array").entries},66933:(E,C,s)=>{"use strict";s(70466);var r=s(97911);E.exports=r("Array").every},9504:(E,C,s)=>{"use strict";s(24990);var r=s(97911);E.exports=r("Array").fill},82168:(E,C,s)=>{"use strict";s(56534);var r=s(97911);E.exports=r("Array").filter},65618:(E,C,s)=>{"use strict";s(12773);var r=s(97911);E.exports=r("Array").findIndex},9186:(E,C,s)=>{"use strict";s(60326);var r=s(97911);E.exports=r("Array").find},98812:(E,C,s)=>{"use strict";s(98792);var r=s(97911);E.exports=r("Array").forEach},58479:(E,C,s)=>{"use strict";s(77059);var r=s(97911);E.exports=r("Array").includes},43207:(E,C,s)=>{"use strict";s(2795);var r=s(97911);E.exports=r("Array").indexOf},33195:(E,C,s)=>{"use strict";s(1285),s(17221);var r=s(97911);E.exports=r("Array").keys},63033:(E,C,s)=>{"use strict";s(74926);var r=s(97911);E.exports=r("Array").lastIndexOf},5736:(E,C,s)=>{"use strict";s(88119);var r=s(97911);E.exports=r("Array").map},7909:(E,C,s)=>{"use strict";s(93870);var r=s(97911);E.exports=r("Array").push},7198:(E,C,s)=>{"use strict";s(46250);var r=s(97911);E.exports=r("Array").reduce},84302:(E,C,s)=>{"use strict";s(32836);var r=s(97911);E.exports=r("Array").reverse},86693:(E,C,s)=>{"use strict";s(72999);var r=s(97911);E.exports=r("Array").slice},24273:(E,C,s)=>{"use strict";s(50733);var r=s(97911);E.exports=r("Array").some},45974:(E,C,s)=>{"use strict";s(93639);var r=s(97911);E.exports=r("Array").sort},68012:(E,C,s)=>{"use strict";s(63117);var r=s(97911);E.exports=r("Array").splice},46332:(E,C,s)=>{"use strict";s(1285),s(17221);var r=s(97911);E.exports=r("Array").values},42618:(E,C,s)=>{"use strict";s(34699);var r=s(13544);E.exports=r.Date.now},97724:(E,C,s)=>{"use strict";s(33379);var r=s(97911);E.exports=r("Function").bind},63791:(E,C,s)=>{"use strict";s(1285),s(3934);var r=s(34014);E.exports=r},69029:(E,C,s)=>{"use strict";var r=s(23336),a=s(97724),c=Function.prototype;E.exports=function(u){var e=u.bind;return u===c||r(c,u)&&e===c.bind?a:e}},28924:(E,C,s)=>{"use strict";var r=s(23336),a=s(77377),c=Array.prototype;E.exports=function(u){var e=u.concat;return u===c||r(c,u)&&e===c.concat?a:e}},98709:(E,C,s)=>{"use strict";var r=s(23336),a=s(66933),c=Array.prototype;E.exports=function(u){var e=u.every;return u===c||r(c,u)&&e===c.every?a:e}},65991:(E,C,s)=>{"use strict";var r=s(23336),a=s(9504),c=Array.prototype;E.exports=function(u){var e=u.fill;return u===c||r(c,u)&&e===c.fill?a:e}},64158:(E,C,s)=>{"use strict";var r=s(23336),a=s(82168),c=Array.prototype;E.exports=function(u){var e=u.filter;return u===c||r(c,u)&&e===c.filter?a:e}},91799:(E,C,s)=>{"use strict";var r=s(23336),a=s(65618),c=Array.prototype;E.exports=function(u){var e=u.findIndex;return u===c||r(c,u)&&e===c.findIndex?a:e}},26155:(E,C,s)=>{"use strict";var r=s(23336),a=s(9186),c=Array.prototype;E.exports=function(u){var e=u.find;return u===c||r(c,u)&&e===c.find?a:e}},33758:(E,C,s)=>{"use strict";var r=s(23336),a=s(58479),c=s(85136),u=Array.prototype,e=String.prototype;E.exports=function(f){var m=f.includes;return f===u||r(u,f)&&m===u.includes?a:"string"==typeof f||f===e||r(e,f)&&m===e.includes?c:m}},7592:(E,C,s)=>{"use strict";var r=s(23336),a=s(43207),c=Array.prototype;E.exports=function(u){var e=u.indexOf;return u===c||r(c,u)&&e===c.indexOf?a:e}},17480:(E,C,s)=>{"use strict";var r=s(23336),a=s(63033),c=Array.prototype;E.exports=function(u){var e=u.lastIndexOf;return u===c||r(c,u)&&e===c.lastIndexOf?a:e}},20681:(E,C,s)=>{"use strict";var r=s(23336),a=s(5736),c=Array.prototype;E.exports=function(u){var e=u.map;return u===c||r(c,u)&&e===c.map?a:e}},801:(E,C,s)=>{"use strict";var r=s(23336),a=s(7909),c=Array.prototype;E.exports=function(u){var e=u.push;return u===c||r(c,u)&&e===c.push?a:e}},90949:(E,C,s)=>{"use strict";var r=s(23336),a=s(7198),c=Array.prototype;E.exports=function(u){var e=u.reduce;return u===c||r(c,u)&&e===c.reduce?a:e}},99316:(E,C,s)=>{"use strict";var r=s(23336),a=s(96302),c=String.prototype;E.exports=function(u){var e=u.repeat;return"string"==typeof u||u===c||r(c,u)&&e===c.repeat?a:e}},62212:(E,C,s)=>{"use strict";var r=s(23336),a=s(84302),c=Array.prototype;E.exports=function(u){var e=u.reverse;return u===c||r(c,u)&&e===c.reverse?a:e}},49073:(E,C,s)=>{"use strict";var r=s(23336),a=s(86693),c=Array.prototype;E.exports=function(u){var e=u.slice;return u===c||r(c,u)&&e===c.slice?a:e}},24146:(E,C,s)=>{"use strict";var r=s(23336),a=s(24273),c=Array.prototype;E.exports=function(u){var e=u.some;return u===c||r(c,u)&&e===c.some?a:e}},40104:(E,C,s)=>{"use strict";var r=s(23336),a=s(45974),c=Array.prototype;E.exports=function(u){var e=u.sort;return u===c||r(c,u)&&e===c.sort?a:e}},3555:(E,C,s)=>{"use strict";var r=s(23336),a=s(68012),c=Array.prototype;E.exports=function(u){var e=u.splice;return u===c||r(c,u)&&e===c.splice?a:e}},42475:(E,C,s)=>{"use strict";var r=s(23336),a=s(98720),c=String.prototype;E.exports=function(u){var e=u.startsWith;return"string"==typeof u||u===c||r(c,u)&&e===c.startsWith?a:e}},65786:(E,C,s)=>{"use strict";var r=s(23336),a=s(75998),c=String.prototype;E.exports=function(u){var e=u.trim;return"string"==typeof u||u===c||r(c,u)&&e===c.trim?a:e}},66306:(E,C,s)=>{"use strict";s(75071);var r=s(13544),a=s(2543);r.JSON||(r.JSON={stringify:JSON.stringify}),E.exports=function(u,e,f){return a(r.JSON.stringify,null,arguments)}},31845:(E,C,s)=>{"use strict";s(1285),s(85140),s(17221),s(3934);var r=s(13544);E.exports=r.Map},44168:(E,C,s)=>{"use strict";s(67234);var r=s(13544);E.exports=r.Object.assign},25852:(E,C,s)=>{"use strict";s(86516);var a=s(13544).Object;E.exports=function(u,e){return a.create(u,e)}},24457:(E,C,s)=>{"use strict";s(36255);var a=s(13544).Object,c=E.exports=function(e,f){return a.defineProperties(e,f)};a.defineProperties.sham&&(c.sham=!0)},99671:(E,C,s)=>{"use strict";s(84468);var a=s(13544).Object,c=E.exports=function(e,f,m){return a.defineProperty(e,f,m)};a.defineProperty.sham&&(c.sham=!0)},38007:(E,C,s)=>{"use strict";s(86627);var a=s(13544).Object,c=E.exports=function(e,f){return a.getOwnPropertyDescriptor(e,f)};a.getOwnPropertyDescriptor.sham&&(c.sham=!0)},57432:(E,C,s)=>{"use strict";s(78275);var r=s(13544);E.exports=r.Object.getOwnPropertyDescriptors},36541:(E,C,s)=>{"use strict";s(56728);var r=s(13544);E.exports=r.Object.getOwnPropertySymbols},17303:(E,C,s)=>{"use strict";s(31193);var r=s(13544);E.exports=r.Object.getPrototypeOf},62149:(E,C,s)=>{"use strict";s(56557);var r=s(13544);E.exports=r.Object.keys},86537:(E,C,s)=>{"use strict";s(17971);var r=s(13544);E.exports=r.Object.setPrototypeOf},79553:(E,C,s)=>{"use strict";s(88923);var r=s(13544);E.exports=r.Object.values},80092:(E,C,s)=>{"use strict";s(10901),s(1285),s(17221),s(66793),s(84798),s(98857),s(30185),s(3934);var r=s(13544);E.exports=r.Promise},472:(E,C,s)=>{"use strict";s(19539);var r=s(13544);E.exports=r.Reflect.construct},4678:(E,C,s)=>{"use strict";s(60851);var r=s(13544);E.exports=r.Reflect.get},85136:(E,C,s)=>{"use strict";s(97764);var r=s(97911);E.exports=r("String").includes},96302:(E,C,s)=>{"use strict";s(3588);var r=s(97911);E.exports=r("String").repeat},98720:(E,C,s)=>{"use strict";s(24655);var r=s(97911);E.exports=r("String").startsWith},75998:(E,C,s)=>{"use strict";s(90451);var r=s(97911);E.exports=r("String").trim},61697:(E,C,s)=>{"use strict";s(1625),s(17221),s(56728),s(16426),s(1172),s(99579),s(41258),s(2383),s(44339),s(64776),s(88215),s(65389),s(12733),s(97977),s(59792),s(60242),s(26291),s(32300),s(63603),s(44864);var r=s(13544);E.exports=r.Symbol},42497:(E,C,s)=>{"use strict";s(1285),s(17221),s(3934),s(2383);var r=s(89734);E.exports=r.f("iterator")},50681:(E,C,s)=>{"use strict";s(68154),s(59792);var r=s(89734);E.exports=r.f("toPrimitive")},31236:(E,C,s)=>{"use strict";E.exports=s(58044)},63811:(E,C,s)=>{"use strict";E.exports=s(99692)},44948:(E,C,s)=>{"use strict";E.exports=s(61483)},96471:(E,C,s)=>{"use strict";E.exports=s(46815)},41171:(E,C,s)=>{"use strict";E.exports=s(47194)},62005:(E,C,s)=>{"use strict";E.exports=s(32944)},42346:(E,C,s)=>{"use strict";E.exports=s(26421)},24329:(E,C,s)=>{"use strict";E.exports=s(15123)},2793:(E,C,s)=>{"use strict";E.exports=s(49745)},88819:(E,C,s)=>{"use strict";E.exports=s(65861)},55912:(E,C,s)=>{"use strict";E.exports=s(63816)},73875:(E,C,s)=>{"use strict";var r=s(43987);E.exports=r},91700:(E,C,s)=>{"use strict";var r=s(99556);E.exports=r},70589:(E,C,s)=>{"use strict";var r=s(39287);E.exports=r},71432:(E,C,s)=>{"use strict";var r=s(25272);E.exports=r},73712:(E,C,s)=>{"use strict";var r=s(54450);E.exports=r},58044:(E,C,s)=>{"use strict";var r=s(39557);E.exports=r},55451:(E,C,s)=>{"use strict";var r=s(61611);E.exports=r},99692:(E,C,s)=>{"use strict";var r=s(4412);E.exports=r},61483:(E,C,s)=>{"use strict";var r=s(22549);E.exports=r},46815:(E,C,s)=>{"use strict";var r=s(47646);E.exports=r},28296:(E,C,s)=>{"use strict";var r=s(78663);s(78271),s(60854),s(10509),s(30887),s(54547),s(68996),s(1530),s(60176),s(41688),s(92847),s(17316),s(58786),s(51943),s(12783),s(69773),s(22337),s(40199),s(69046),s(84131),E.exports=r},96973:(E,C,s)=>{"use strict";var r=s(48498);E.exports=r},47194:(E,C,s)=>{"use strict";var r=s(4922);E.exports=r},56805:(E,C,s)=>{"use strict";var r=s(95190);E.exports=r},32944:(E,C,s)=>{"use strict";var r=s(78525);E.exports=r},70729:(E,C,s)=>{"use strict";var r=s(21064);E.exports=r},48299:(E,C,s)=>{"use strict";var r=s(65641);E.exports=r},33969:(E,C,s)=>{"use strict";var r=s(21693);E.exports=r},26421:(E,C,s)=>{"use strict";var r=s(88907);E.exports=r},37785:(E,C,s)=>{"use strict";var r=s(41432);E.exports=r},15123:(E,C,s)=>{"use strict";var r=s(7398);E.exports=r},49745:(E,C,s)=>{"use strict";var r=s(67221);s(67670),s(61127),s(93114),s(45975),E.exports=r},29044:(E,C,s)=>{"use strict";var r=s(67447);E.exports=r},20611:(E,C,s)=>{"use strict";var r=s(58811);E.exports=r},65861:(E,C,s)=>{"use strict";var r=s(19573);s(70337),s(44388),s(87097),s(90212),s(61652),s(90791),s(29559),s(93770),s(47743),E.exports=r},63816:(E,C,s)=>{"use strict";var r=s(10226);E.exports=r},72378:(E,C,s)=>{"use strict";var r=s(56378);E.exports=r},61812:(E,C,s)=>{"use strict";var r=s(52208),a=s(7378),c=TypeError;E.exports=function(u){if(r(u))return u;throw c(a(u)+" is not a function")}},54356:(E,C,s)=>{"use strict";var r=s(81177),a=s(7378),c=TypeError;E.exports=function(u){if(r(u))return u;throw c(a(u)+" is not a constructor")}},64902:(E,C,s)=>{"use strict";var r=s(7378);E.exports=function(a){if("object"==typeof a&&"size"in a&&"has"in a&&"get"in a&&"set"in a&&"delete"in a&&"entries"in a)return a;throw TypeError(r(a)+" is not a map")}},93221:(E,C,s)=>{"use strict";var r=s(52208),a=String,c=TypeError;E.exports=function(u){if("object"==typeof u||r(u))return u;throw c("Can't set "+a(u)+" as a prototype")}},82196:E=>{"use strict";E.exports=function(){}},54849:(E,C,s)=>{"use strict";var r=s(23336),a=TypeError;E.exports=function(c,u){if(r(u,c))return c;throw a("Incorrect invocation")}},64562:(E,C,s)=>{"use strict";var r=s(77293),a=String,c=TypeError;E.exports=function(u){if(r(u))return u;throw c(a(u)+" is not an object")}},76318:(E,C,s)=>{"use strict";var r=s(55756);E.exports=r(function(){if("function"==typeof ArrayBuffer){var a=new ArrayBuffer(8);Object.isExtensible(a)&&Object.defineProperty(a,"a",{value:8})}})},35277:(E,C,s)=>{"use strict";var r=s(70267),a=s(19401),c=s(6381);E.exports=function(e){for(var f=r(this),m=c(f),T=arguments.length,M=a(T>1?arguments[1]:void 0,m),w=T>2?arguments[2]:void 0,D=void 0===w?m:a(w,m);D>M;)f[M++]=e;return f}},8366:(E,C,s)=>{"use strict";var r=s(68607).forEach,c=s(33620)("forEach");E.exports=c?[].forEach:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}},51923:(E,C,s)=>{"use strict";var r=s(76781),a=s(25401),c=s(70267),u=s(93463),e=s(39918),f=s(81177),m=s(6381),T=s(46751),M=s(88055),w=s(34014),D=Array;E.exports=function(W){var $=c(W),J=f(this),F=arguments.length,X=F>1?arguments[1]:void 0,de=void 0!==X;de&&(X=r(X,F>2?arguments[2]:void 0));var se,fe,Te,$e,ge,Et,V=w($),ce=0;if(!V||this===D&&e(V))for(se=m($),fe=J?new this(se):D(se);se>ce;ce++)Et=de?X($[ce],ce):$[ce],T(fe,ce,Et);else for(ge=($e=M($,V)).next,fe=J?new this:[];!(Te=a(ge,$e)).done;ce++)Et=de?u($e,X,[Te.value,ce],!0):Te.value,T(fe,ce,Et);return fe.length=ce,fe}},95171:(E,C,s)=>{"use strict";var r=s(81010),a=s(19401),c=s(6381),u=function(e){return function(f,m,T){var U,M=r(f),w=c(M),D=a(T,w);if(e&&m!=m){for(;w>D;)if((U=M[D++])!=U)return!0}else for(;w>D;D++)if((e||D in M)&&M[D]===m)return e||D||0;return!e&&-1}};E.exports={includes:u(!0),indexOf:u(!1)}},68607:(E,C,s)=>{"use strict";var r=s(76781),a=s(23634),c=s(20973),u=s(70267),e=s(6381),f=s(2103),m=a([].push),T=function(M){var w=1===M,D=2===M,U=3===M,W=4===M,$=6===M,J=7===M,F=5===M||$;return function(X,de,V,ce){for(var ct,qe,se=u(X),fe=c(se),Te=r(de,V),$e=e(fe),ge=0,Et=ce||f,ot=w?Et(X,$e):D||J?Et(X,0):void 0;$e>ge;ge++)if((F||ge in fe)&&(qe=Te(ct=fe[ge],ge,se),M))if(w)ot[ge]=qe;else if(qe)switch(M){case 3:return!0;case 5:return ct;case 6:return ge;case 2:m(ot,ct)}else switch(M){case 4:return!1;case 7:m(ot,ct)}return $?-1:U||W?W:ot}};E.exports={forEach:T(0),map:T(1),filter:T(2),some:T(3),every:T(4),find:T(5),findIndex:T(6),filterReject:T(7)}},78375:(E,C,s)=>{"use strict";var r=s(2543),a=s(81010),c=s(33912),u=s(6381),e=s(33620),f=Math.min,m=[].lastIndexOf,T=!!m&&1/[1].lastIndexOf(1,-0)<0,M=e("lastIndexOf");E.exports=T||!M?function(U){if(T)return r(m,this,arguments)||0;var W=a(this),$=u(W),J=$-1;for(arguments.length>1&&(J=f(J,c(arguments[1]))),J<0&&(J=$+J);J>=0;J--)if(J in W&&W[J]===U)return J||0;return-1}:m},95913:(E,C,s)=>{"use strict";var r=s(55756),a=s(91840),c=s(63556),u=a("species");E.exports=function(e){return c>=51||!r(function(){var f=[];return(f.constructor={})[u]=function(){return{foo:1}},1!==f[e](Boolean).foo})}},33620:(E,C,s)=>{"use strict";var r=s(55756);E.exports=function(a,c){var u=[][a];return!!u&&r(function(){u.call(null,c||function(){return 1},1)})}},88908:(E,C,s)=>{"use strict";var r=s(61812),a=s(70267),c=s(20973),u=s(6381),e=TypeError,f=function(m){return function(T,M,w,D){r(M);var U=a(T),W=c(U),$=u(U),J=m?$-1:0,F=m?-1:1;if(w<2)for(;;){if(J in W){D=W[J],J+=F;break}if(J+=F,m?J<0:$<=J)throw e("Reduce of empty array with no initial value")}for(;m?J>=0:$>J;J+=F)J in W&&(D=M(D,W[J],J,U));return D}};E.exports={left:f(!1),right:f(!0)}},54716:(E,C,s)=>{"use strict";var r=s(49642),a=s(89735),c=TypeError,u=Object.getOwnPropertyDescriptor,e=r&&!function(){if(void 0!==this)return!0;try{Object.defineProperty([],"length",{writable:!1}).length=1}catch(f){return f instanceof TypeError}}();E.exports=e?function(f,m){if(a(f)&&!u(f,"length").writable)throw c("Cannot set read only .length");return f.length=m}:function(f,m){return f.length=m}},8681:(E,C,s)=>{"use strict";var r=s(19401),a=s(6381),c=s(46751),u=Array,e=Math.max;E.exports=function(f,m,T){for(var M=a(f),w=r(m,M),D=r(void 0===T?M:T,M),U=u(e(D-w,0)),W=0;w{"use strict";var r=s(23634);E.exports=r([].slice)},84865:(E,C,s)=>{"use strict";var r=s(8681),a=Math.floor,c=function(f,m){var T=f.length,M=a(T/2);return T<8?u(f,m):e(f,c(r(f,0,M),m),c(r(f,M),m),m)},u=function(f,m){for(var w,D,T=f.length,M=1;M0;)f[D]=f[--D];D!==M++&&(f[D]=w)}return f},e=function(f,m,T,M){for(var w=m.length,D=T.length,U=0,W=0;U{"use strict";var r=s(89735),a=s(81177),c=s(77293),e=s(91840)("species"),f=Array;E.exports=function(m){var T;return r(m)&&(a(T=m.constructor)&&(T===f||r(T.prototype))||c(T)&&null===(T=T[e]))&&(T=void 0),void 0===T?f:T}},2103:(E,C,s)=>{"use strict";var r=s(48045);E.exports=function(a,c){return new(r(a))(0===c?0:c)}},93463:(E,C,s)=>{"use strict";var r=s(64562),a=s(40798);E.exports=function(c,u,e,f){try{return f?u(r(e)[0],e[1]):u(e)}catch(m){a(c,"throw",m)}}},49458:E=>{"use strict";E.exports=function(C,s){return 1===s?function(r,a){return r[C](a)}:function(r,a,c){return r[C](a,c)}}},5253:(E,C,s)=>{"use strict";var a=s(91840)("iterator"),c=!1;try{var u=0,e={next:function(){return{done:!!u++}},return:function(){c=!0}};e[a]=function(){return this},Array.from(e,function(){throw 2})}catch{}E.exports=function(f,m){try{if(!m&&!c)return!1}catch{return!1}var T=!1;try{var M={};M[a]=function(){return{next:function(){return{done:T=!0}}}},f(M)}catch{}return T}},49806:(E,C,s)=>{"use strict";var r=s(23634),a=r({}.toString),c=r("".slice);E.exports=function(u){return c(a(u),8,-1)}},35329:(E,C,s)=>{"use strict";var r=s(5552),a=s(52208),c=s(49806),e=s(91840)("toStringTag"),f=Object,m="Arguments"===c(function(){return arguments}());E.exports=r?c:function(M){var w,D,U;return void 0===M?"Undefined":null===M?"Null":"string"==typeof(D=function(M,w){try{return M[w]}catch{}}(w=f(M),e))?D:m?c(w):"Object"===(U=c(w))&&a(w.callee)?"Arguments":U}},83483:(E,C,s)=>{"use strict";var r=s(76781),a=s(25401),c=s(61812),u=s(54356),e=s(43550),f=s(41605),m=[].push;E.exports=function(M){var U,W,$,J,w=arguments.length,D=w>1?arguments[1]:void 0;return u(this),(U=void 0!==D)&&c(D),e(M)?new this:(W=[],U?($=0,J=r(D,w>2?arguments[2]:void 0),f(M,function(F){a(m,W,J(F,$++))})):f(M,m,{that:W}),new this(W))}},13067:(E,C,s)=>{"use strict";var r=s(37591);E.exports=function(){return new this(r(arguments))}},26650:(E,C,s)=>{"use strict";var r=s(83272),a=s(1707),c=s(84604),u=s(76781),e=s(54849),f=s(43550),m=s(41605),T=s(79077),M=s(28738),w=s(58014),D=s(49642),U=s(57867).fastKey,W=s(91093),$=W.set,J=W.getterFor;E.exports={getConstructor:function(F,X,de,V){var ce=F(function(ge,Et){e(ge,se),$(ge,{type:X,index:r(null),first:void 0,last:void 0,size:0}),D||(ge.size=0),f(Et)||m(Et,ge[V],{that:ge,AS_ENTRIES:de})}),se=ce.prototype,fe=J(X),Te=function(ge,Et,ot){var He,We,ct=fe(ge),qe=$e(ge,Et);return qe?qe.value=ot:(ct.last=qe={index:We=U(Et,!0),key:Et,value:ot,previous:He=ct.last,next:void 0,removed:!1},ct.first||(ct.first=qe),He&&(He.next=qe),D?ct.size++:ge.size++,"F"!==We&&(ct.index[We]=qe)),ge},$e=function(ge,Et){var qe,ot=fe(ge),ct=U(Et);if("F"!==ct)return ot.index[ct];for(qe=ot.first;qe;qe=qe.next)if(qe.key===Et)return qe};return c(se,{clear:function(){for(var ot=fe(this),ct=ot.index,qe=ot.first;qe;)qe.removed=!0,qe.previous&&(qe.previous=qe.previous.next=void 0),delete ct[qe.index],qe=qe.next;ot.first=ot.last=void 0,D?ot.size=0:this.size=0},delete:function(ge){var ot=fe(this),ct=$e(this,ge);if(ct){var qe=ct.next,He=ct.previous;delete ot.index[ct.index],ct.removed=!0,He&&(He.next=qe),qe&&(qe.previous=He),ot.first===ct&&(ot.first=qe),ot.last===ct&&(ot.last=He),D?ot.size--:this.size--}return!!ct},forEach:function(Et){for(var qe,ot=fe(this),ct=u(Et,arguments.length>1?arguments[1]:void 0);qe=qe?qe.next:ot.first;)for(ct(qe.value,qe.key,this);qe&&qe.removed;)qe=qe.previous},has:function(Et){return!!$e(this,Et)}}),c(se,de?{get:function(Et){var ot=$e(this,Et);return ot&&ot.value},set:function(Et,ot){return Te(this,0===Et?0:Et,ot)}}:{add:function(Et){return Te(this,Et=0===Et?0:Et,Et)}}),D&&a(se,"size",{configurable:!0,get:function(){return fe(this).size}}),ce},setStrong:function(F,X,de){var V=X+" Iterator",ce=J(X),se=J(V);T(F,X,function(fe,Te){$(this,{type:V,target:fe,state:ce(fe),kind:Te,last:void 0})},function(){for(var fe=se(this),Te=fe.kind,$e=fe.last;$e&&$e.removed;)$e=$e.previous;return fe.target&&(fe.last=$e=$e?$e.next:fe.state.first)?M("keys"===Te?$e.key:"values"===Te?$e.value:[$e.key,$e.value],!1):(fe.target=void 0,M(void 0,!0))},de?"entries":"values",!de,!0),w(X)}}},85116:(E,C,s)=>{"use strict";var r=s(90513),a=s(70009),c=s(57867),u=s(55756),e=s(65162),f=s(41605),m=s(54849),T=s(52208),M=s(77293),w=s(43550),D=s(85681),U=s(48011).f,W=s(68607).forEach,$=s(49642),J=s(91093),F=J.set,X=J.getterFor;E.exports=function(de,V,ce){var ot,se=-1!==de.indexOf("Map"),fe=-1!==de.indexOf("Weak"),Te=se?"set":"add",$e=a[de],ge=$e&&$e.prototype,Et={};if($&&T($e)&&(fe||ge.forEach&&!u(function(){(new $e).entries().next()}))){var ct=(ot=V(function(He,We){F(m(He,ct),{type:de,collection:new $e}),w(We)||f(We,He[Te],{that:He,AS_ENTRIES:se})})).prototype,qe=X(de);W(["add","clear","delete","forEach","get","has","set","keys","values","entries"],function(He){var We="add"===He||"set"===He;He in ge&&(!fe||"clear"!==He)&&e(ct,He,function(Le,Pt){var it=qe(this).collection;if(!We&&fe&&!M(Le))return"get"===He&&void 0;var Xt=it[He](0===Le?0:Le,Pt);return We?this:Xt})}),fe||U(ct,"size",{configurable:!0,get:function(){return qe(this).collection.size}})}else ot=ce.getConstructor(V,de,se,Te),c.enable();return D(ot,de,!1,!0),Et[de]=ot,r({global:!0,forced:!0},Et),fe||ce.setStrong(ot,de,se),ot}},65031:(E,C,s)=>{"use strict";var r=s(80112),a=s(59823),c=s(25525),u=s(48011);E.exports=function(e,f,m){for(var T=a(f),M=u.f,w=c.f,D=0;D{"use strict";var a=s(91840)("match");E.exports=function(c){var u=/./;try{"/./"[c](u)}catch{try{return u[a]=!1,"/./"[c](u)}catch{}}return!1}},37112:(E,C,s)=>{"use strict";var r=s(55756);E.exports=!r(function(){function a(){}return a.prototype.constructor=null,Object.getPrototypeOf(new a)!==a.prototype})},28738:E=>{"use strict";E.exports=function(C,s){return{value:C,done:s}}},65162:(E,C,s)=>{"use strict";var r=s(49642),a=s(48011),c=s(51361);E.exports=r?function(u,e,f){return a.f(u,e,c(1,f))}:function(u,e,f){return u[e]=f,u}},51361:E=>{"use strict";E.exports=function(C,s){return{enumerable:!(1&C),configurable:!(2&C),writable:!(4&C),value:s}}},46751:(E,C,s)=>{"use strict";var r=s(62939),a=s(48011),c=s(51361);E.exports=function(u,e,f){var m=r(e);m in u?a.f(u,m,c(0,f)):u[m]=f}},1707:(E,C,s)=>{"use strict";var r=s(48011);E.exports=function(a,c,u){return r.f(a,c,u)}},42915:(E,C,s)=>{"use strict";var r=s(65162);E.exports=function(a,c,u,e){return e&&e.enumerable?a[c]=u:r(a,c,u),a}},84604:(E,C,s)=>{"use strict";var r=s(42915);E.exports=function(a,c,u){for(var e in c)u&&u.unsafe&&a[e]?a[e]=c[e]:r(a,e,c[e],u);return a}},34056:(E,C,s)=>{"use strict";var r=s(70009),a=Object.defineProperty;E.exports=function(c,u){try{a(r,c,{value:u,configurable:!0,writable:!0})}catch{r[c]=u}return u}},67236:(E,C,s)=>{"use strict";var r=s(7378),a=TypeError;E.exports=function(c,u){if(!delete c[u])throw a("Cannot delete property "+r(u)+" of "+r(c))}},49642:(E,C,s)=>{"use strict";var r=s(55756);E.exports=!r(function(){return 7!==Object.defineProperty({},1,{get:function(){return 7}})[1]})},59478:E=>{"use strict";var C="object"==typeof document&&document.all;E.exports={all:C,IS_HTMLDDA:typeof C>"u"&&void 0!==C}},96682:(E,C,s)=>{"use strict";var r=s(70009),a=s(77293),c=r.document,u=a(c)&&a(c.createElement);E.exports=function(e){return u?c.createElement(e):{}}},11594:E=>{"use strict";var C=TypeError;E.exports=function(r){if(r>9007199254740991)throw C("Maximum allowed index exceeded");return r}},44125:E=>{"use strict";E.exports={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0}},36410:(E,C,s)=>{"use strict";var a=s(86053).match(/firefox\/(\d+)/i);E.exports=!!a&&+a[1]},34008:(E,C,s)=>{"use strict";var r=s(31813),a=s(3787);E.exports=!r&&!a&&"object"==typeof window&&"object"==typeof document},70902:E=>{"use strict";E.exports="function"==typeof Bun&&Bun&&"string"==typeof Bun.version},31813:E=>{"use strict";E.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},5329:(E,C,s)=>{"use strict";var r=s(86053);E.exports=/MSIE|Trident/.test(r)},16137:(E,C,s)=>{"use strict";var r=s(86053);E.exports=/ipad|iphone|ipod/i.test(r)&&typeof Pebble<"u"},3877:(E,C,s)=>{"use strict";var r=s(86053);E.exports=/(?:ipad|iphone|ipod).*applewebkit/i.test(r)},3787:(E,C,s)=>{"use strict";var r=s(70009),a=s(49806);E.exports="process"===a(r.process)},85308:(E,C,s)=>{"use strict";var r=s(86053);E.exports=/web0s(?!.*chrome)/i.test(r)},86053:E=>{"use strict";E.exports=typeof navigator<"u"&&String(navigator.userAgent)||""},63556:(E,C,s)=>{"use strict";var m,T,r=s(70009),a=s(86053),c=r.process,u=r.Deno,e=c&&c.versions||u&&u.version,f=e&&e.v8;f&&(T=(m=f.split("."))[0]>0&&m[0]<4?1:+(m[0]+m[1])),!T&&a&&(!(m=a.match(/Edge\/(\d+)/))||m[1]>=74)&&(m=a.match(/Chrome\/(\d+)/))&&(T=+m[1]),E.exports=T},34545:(E,C,s)=>{"use strict";var a=s(86053).match(/AppleWebKit\/(\d+)\./);E.exports=!!a&&+a[1]},97911:(E,C,s)=>{"use strict";var r=s(13544);E.exports=function(a){return r[a+"Prototype"]}},44939:E=>{"use strict";E.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},40039:(E,C,s)=>{"use strict";var r=s(23634),a=Error,c=r("".replace),u=String(a("zxcasd").stack),e=/\n\s*at [^:]*:[^\n]*/,f=e.test(u);E.exports=function(m,T){if(f&&"string"==typeof m&&!a.prepareStackTrace)for(;T--;)m=c(m,e,"");return m}},77732:(E,C,s)=>{"use strict";var r=s(65162),a=s(40039),c=s(50499),u=Error.captureStackTrace;E.exports=function(e,f,m,T){c&&(u?u(e,f):r(e,"stack",a(m,T)))}},50499:(E,C,s)=>{"use strict";var r=s(55756),a=s(51361);E.exports=!r(function(){var c=Error("a");return!("stack"in c)||(Object.defineProperty(c,"stack",a(1,7)),7!==c.stack)})},90513:(E,C,s)=>{"use strict";var r=s(70009),a=s(2543),c=s(64350),u=s(52208),e=s(25525).f,f=s(79482),m=s(13544),T=s(76781),M=s(65162),w=s(80112),D=function(U){var W=function($,J,F){if(this instanceof W){switch(arguments.length){case 0:return new U;case 1:return new U($);case 2:return new U($,J)}return new U($,J,F)}return a(U,this,arguments)};return W.prototype=U.prototype,W};E.exports=function(U,W){var se,fe,Te,$e,ge,Et,ot,ct,qe,$=U.target,J=U.global,F=U.stat,X=U.proto,de=J?r:F?r[$]:(r[$]||{}).prototype,V=J?m:m[$]||M(m,$,{})[$],ce=V.prototype;for($e in W)fe=!(se=f(J?$e:$+(F?".":"#")+$e,U.forced))&&de&&w(de,$e),Et=V[$e],fe&&(ot=U.dontCallGetSet?(qe=e(de,$e))&&qe.value:de[$e]),ge=fe&&ot?ot:W[$e],(!fe||typeof Et!=typeof ge)&&(ct=U.bind&&fe?T(ge,r):U.wrap&&fe?D(ge):X&&u(ge)?c(ge):ge,(U.sham||ge&&ge.sham||Et&&Et.sham)&&M(ct,"sham",!0),M(V,$e,ct),X&&(w(m,Te=$+"Prototype")||M(m,Te,{}),M(m[Te],$e,ge),U.real&&ce&&(se||!ce[$e])&&M(ce,$e,ge)))}},55756:E=>{"use strict";E.exports=function(C){try{return!!C()}catch{return!0}}},3124:(E,C,s)=>{"use strict";var r=s(55756);E.exports=!r(function(){return Object.isExtensible(Object.preventExtensions({}))})},2543:(E,C,s)=>{"use strict";var r=s(29046),a=Function.prototype,c=a.apply,u=a.call;E.exports="object"==typeof Reflect&&Reflect.apply||(r?u.bind(c):function(){return u.apply(c,arguments)})},76781:(E,C,s)=>{"use strict";var r=s(64350),a=s(61812),c=s(29046),u=r(r.bind);E.exports=function(e,f){return a(e),void 0===f?e:c?u(e,f):function(){return e.apply(f,arguments)}}},29046:(E,C,s)=>{"use strict";var r=s(55756);E.exports=!r(function(){var a=function(){}.bind();return"function"!=typeof a||a.hasOwnProperty("prototype")})},44197:(E,C,s)=>{"use strict";var r=s(23634),a=s(61812),c=s(77293),u=s(80112),e=s(37591),f=s(29046),m=Function,T=r([].concat),M=r([].join),w={},D=function(U,W,$){if(!u(w,W)){for(var J=[],F=0;F{"use strict";var r=s(29046),a=Function.prototype.call;E.exports=r?a.bind(a):function(){return a.apply(a,arguments)}},29862:(E,C,s)=>{"use strict";var r=s(49642),a=s(80112),c=Function.prototype,u=r&&Object.getOwnPropertyDescriptor,e=a(c,"name"),f=e&&"something"===function(){}.name,m=e&&(!r||r&&u(c,"name").configurable);E.exports={EXISTS:e,PROPER:f,CONFIGURABLE:m}},13325:(E,C,s)=>{"use strict";var r=s(23634),a=s(61812);E.exports=function(c,u,e){try{return r(a(Object.getOwnPropertyDescriptor(c,u)[e]))}catch{}}},64350:(E,C,s)=>{"use strict";var r=s(49806),a=s(23634);E.exports=function(c){if("Function"===r(c))return a(c)}},23634:(E,C,s)=>{"use strict";var r=s(29046),a=Function.prototype,c=a.call,u=r&&a.bind.bind(c,c);E.exports=r?u:function(e){return function(){return c.apply(e,arguments)}}},7365:(E,C,s)=>{"use strict";var r=s(13544),a=s(70009),c=s(52208),u=function(e){return c(e)?e:void 0};E.exports=function(e,f){return arguments.length<2?u(r[e])||u(a[e]):r[e]&&r[e][f]||a[e]&&a[e][f]}},34014:(E,C,s)=>{"use strict";var r=s(35329),a=s(34778),c=s(43550),u=s(84394),f=s(91840)("iterator");E.exports=function(m){if(!c(m))return a(m,f)||a(m,"@@iterator")||u[r(m)]}},88055:(E,C,s)=>{"use strict";var r=s(25401),a=s(61812),c=s(64562),u=s(7378),e=s(34014),f=TypeError;E.exports=function(m,T){var M=arguments.length<2?e(m):T;if(a(M))return c(r(M,m));throw f(u(m)+" is not iterable")}},32092:(E,C,s)=>{"use strict";var r=s(23634),a=s(89735),c=s(52208),u=s(49806),e=s(41433),f=r([].push);E.exports=function(m){if(c(m))return m;if(a(m)){for(var T=m.length,M=[],w=0;w{"use strict";var r=s(61812),a=s(43550);E.exports=function(c,u){var e=c[u];return a(e)?void 0:r(e)}},70009:function(E){"use strict";var C=function(s){return s&&s.Math===Math&&s};E.exports=C("object"==typeof globalThis&&globalThis)||C("object"==typeof window&&window)||C("object"==typeof self&&self)||C("object"==typeof global&&global)||function(){return this}()||this||Function("return this")()},80112:(E,C,s)=>{"use strict";var r=s(23634),a=s(70267),c=r({}.hasOwnProperty);E.exports=Object.hasOwn||function(e,f){return c(a(e),f)}},45599:E=>{"use strict";E.exports={}},52912:E=>{"use strict";E.exports=function(C,s){try{1===arguments.length?console.error(C):console.error(C,s)}catch{}}},55690:(E,C,s)=>{"use strict";var r=s(7365);E.exports=r("document","documentElement")},50495:(E,C,s)=>{"use strict";var r=s(49642),a=s(55756),c=s(96682);E.exports=!r&&!a(function(){return 7!==Object.defineProperty(c("div"),"a",{get:function(){return 7}}).a})},20973:(E,C,s)=>{"use strict";var r=s(23634),a=s(55756),c=s(49806),u=Object,e=r("".split);E.exports=a(function(){return!u("z").propertyIsEnumerable(0)})?function(f){return"String"===c(f)?e(f,""):u(f)}:u},26699:(E,C,s)=>{"use strict";var r=s(23634),a=s(52208),c=s(24766),u=r(Function.toString);a(c.inspectSource)||(c.inspectSource=function(e){return u(e)}),E.exports=c.inspectSource},33411:(E,C,s)=>{"use strict";var r=s(77293),a=s(65162);E.exports=function(c,u){r(u)&&"cause"in u&&a(c,"cause",u.cause)}},57867:(E,C,s)=>{"use strict";var r=s(90513),a=s(23634),c=s(45599),u=s(77293),e=s(80112),f=s(48011).f,m=s(51518),T=s(62469),M=s(46401),w=s(13708),D=s(3124),U=!1,W=w("meta"),$=0,J=function(se){f(se,W,{value:{objectID:"O"+$++,weakData:{}}})},ce=E.exports={enable:function(){ce.enable=function(){},U=!0;var se=m.f,fe=a([].splice),Te={};Te[W]=1,se(Te).length&&(m.f=function($e){for(var ge=se($e),Et=0,ot=ge.length;Et{"use strict";var U,W,$,r=s(81101),a=s(70009),c=s(77293),u=s(65162),e=s(80112),f=s(24766),m=s(86066),T=s(45599),M="Object already initialized",w=a.TypeError;if(r||f.state){var X=f.state||(f.state=new(0,a.WeakMap));X.get=X.get,X.has=X.has,X.set=X.set,U=function(V,ce){if(X.has(V))throw w(M);return ce.facade=V,X.set(V,ce),ce},W=function(V){return X.get(V)||{}},$=function(V){return X.has(V)}}else{var de=m("state");T[de]=!0,U=function(V,ce){if(e(V,de))throw w(M);return ce.facade=V,u(V,de,ce),ce},W=function(V){return e(V,de)?V[de]:{}},$=function(V){return e(V,de)}}E.exports={set:U,get:W,has:$,enforce:function(V){return $(V)?W(V):U(V,{})},getterFor:function(V){return function(ce){var se;if(!c(ce)||(se=W(ce)).type!==V)throw w("Incompatible receiver, "+V+" required");return se}}}},39918:(E,C,s)=>{"use strict";var r=s(91840),a=s(84394),c=r("iterator"),u=Array.prototype;E.exports=function(e){return void 0!==e&&(a.Array===e||u[c]===e)}},89735:(E,C,s)=>{"use strict";var r=s(49806);E.exports=Array.isArray||function(c){return"Array"===r(c)}},52208:(E,C,s)=>{"use strict";var r=s(59478),a=r.all;E.exports=r.IS_HTMLDDA?function(c){return"function"==typeof c||c===a}:function(c){return"function"==typeof c}},81177:(E,C,s)=>{"use strict";var r=s(23634),a=s(55756),c=s(52208),u=s(35329),e=s(7365),f=s(26699),m=function(){},T=[],M=e("Reflect","construct"),w=/^\s*(?:class|function)\b/,D=r(w.exec),U=!w.exec(m),W=function(F){if(!c(F))return!1;try{return M(m,T,F),!0}catch{return!1}},$=function(F){if(!c(F))return!1;switch(u(F)){case"AsyncFunction":case"GeneratorFunction":case"AsyncGeneratorFunction":return!1}try{return U||!!D(w,f(F))}catch{return!0}};$.sham=!0,E.exports=!M||a(function(){var J;return W(W.call)||!W(Object)||!W(function(){J=!0})||J})?$:W},27029:(E,C,s)=>{"use strict";var r=s(80112);E.exports=function(a){return void 0!==a&&(r(a,"value")||r(a,"writable"))}},79482:(E,C,s)=>{"use strict";var r=s(55756),a=s(52208),c=/#|\.prototype\./,u=function(M,w){var D=f[e(M)];return D===T||D!==m&&(a(w)?r(w):!!w)},e=u.normalize=function(M){return String(M).replace(c,".").toLowerCase()},f=u.data={},m=u.NATIVE="N",T=u.POLYFILL="P";E.exports=u},43550:E=>{"use strict";E.exports=function(C){return null==C}},77293:(E,C,s)=>{"use strict";var r=s(52208),a=s(59478),c=a.all;E.exports=a.IS_HTMLDDA?function(u){return"object"==typeof u?null!==u:r(u)||u===c}:function(u){return"object"==typeof u?null!==u:r(u)}},81124:E=>{"use strict";E.exports=!0},60373:(E,C,s)=>{"use strict";var r=s(77293),a=s(49806),u=s(91840)("match");E.exports=function(e){var f;return r(e)&&(void 0!==(f=e[u])?!!f:"RegExp"===a(e))}},74717:(E,C,s)=>{"use strict";var r=s(7365),a=s(52208),c=s(23336),u=s(99554),e=Object;E.exports=u?function(f){return"symbol"==typeof f}:function(f){var m=r("Symbol");return a(m)&&c(m.prototype,e(f))}},87463:(E,C,s)=>{"use strict";var r=s(25401);E.exports=function(a,c,u){for(var m,T,e=u?a:a.iterator,f=a.next;!(m=r(f,e)).done;)if(void 0!==(T=c(m.value)))return T}},41605:(E,C,s)=>{"use strict";var r=s(76781),a=s(25401),c=s(64562),u=s(7378),e=s(39918),f=s(6381),m=s(23336),T=s(88055),M=s(34014),w=s(40798),D=TypeError,U=function($,J){this.stopped=$,this.result=J},W=U.prototype;E.exports=function($,J,F){var Te,$e,ge,Et,ot,ct,qe,de=!(!F||!F.AS_ENTRIES),V=!(!F||!F.IS_RECORD),ce=!(!F||!F.IS_ITERATOR),se=!(!F||!F.INTERRUPTED),fe=r(J,F&&F.that),He=function(Le){return Te&&w(Te,"normal",Le),new U(!0,Le)},We=function(Le){return de?(c(Le),se?fe(Le[0],Le[1],He):fe(Le[0],Le[1])):se?fe(Le,He):fe(Le)};if(V)Te=$.iterator;else if(ce)Te=$;else{if(!($e=M($)))throw D(u($)+" is not iterable");if(e($e)){for(ge=0,Et=f($);Et>ge;ge++)if((ot=We($[ge]))&&m(W,ot))return ot;return new U(!1)}Te=T($,$e)}for(ct=V?$.next:Te.next;!(qe=a(ct,Te)).done;){try{ot=We(qe.value)}catch(Le){w(Te,"throw",Le)}if("object"==typeof ot&&ot&&m(W,ot))return ot}return new U(!1)}},40798:(E,C,s)=>{"use strict";var r=s(25401),a=s(64562),c=s(34778);E.exports=function(u,e,f){var m,T;a(u);try{if(!(m=c(u,"return"))){if("throw"===e)throw f;return f}m=r(m,u)}catch(M){T=!0,m=M}if("throw"===e)throw f;if(T)throw m;return a(m),f}},14554:(E,C,s)=>{"use strict";var r=s(38432).IteratorPrototype,a=s(83272),c=s(51361),u=s(85681),e=s(84394),f=function(){return this};E.exports=function(m,T,M,w){var D=T+" Iterator";return m.prototype=a(r,{next:c(+!w,M)}),u(m,D,!1,!0),e[D]=f,m}},79077:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(81124),u=s(29862),e=s(52208),f=s(14554),m=s(31426),T=s(54945),M=s(85681),w=s(65162),D=s(42915),U=s(91840),W=s(84394),$=s(38432),J=u.PROPER,F=u.CONFIGURABLE,X=$.IteratorPrototype,de=$.BUGGY_SAFARI_ITERATORS,V=U("iterator"),se="values",fe="entries",Te=function(){return this};E.exports=function($e,ge,Et,ot,ct,qe,He){f(Et,ge,ot);var Rn,At,qt,We=function(sn){if(sn===ct&&cn)return cn;if(!de&&sn&&sn in it)return it[sn];switch(sn){case"keys":case se:case fe:return function(){return new Et(this,sn)}}return function(){return new Et(this)}},Le=ge+" Iterator",Pt=!1,it=$e.prototype,Xt=it[V]||it["@@iterator"]||ct&&it[ct],cn=!de&&Xt||We(ct),pn="Array"===ge&&it.entries||Xt;if(pn&&(Rn=m(pn.call(new $e)))!==Object.prototype&&Rn.next&&(!c&&m(Rn)!==X&&(T?T(Rn,X):e(Rn[V])||D(Rn,V,Te)),M(Rn,Le,!0,!0),c&&(W[Le]=Te)),J&&ct===se&&Xt&&Xt.name!==se&&(!c&&F?w(it,"name",se):(Pt=!0,cn=function(){return a(Xt,this)})),ct)if(At={values:We(se),keys:qe?cn:We("keys"),entries:We(fe)},He)for(qt in At)(de||Pt||!(qt in it))&&D(it,qt,At[qt]);else r({target:ge,proto:!0,forced:de||Pt},At);return(!c||He)&&it[V]!==cn&&D(it,V,cn,{name:ct}),W[ge]=cn,At}},38432:(E,C,s)=>{"use strict";var D,U,W,r=s(55756),a=s(52208),c=s(77293),u=s(83272),e=s(31426),f=s(42915),m=s(91840),T=s(81124),M=m("iterator"),w=!1;[].keys&&("next"in(W=[].keys())?(U=e(e(W)))!==Object.prototype&&(D=U):w=!0),!c(D)||r(function(){var J={};return D[M].call(J)!==J})?D={}:T&&(D=u(D)),a(D[M])||f(D,M,function(){return this}),E.exports={IteratorPrototype:D,BUGGY_SAFARI_ITERATORS:w}},84394:E=>{"use strict";E.exports={}},6381:(E,C,s)=>{"use strict";var r=s(48869);E.exports=function(a){return r(a.length)}},60077:(E,C,s)=>{"use strict";var r=s(7365),a=s(49458),c=r("Map");E.exports={Map:c,set:a("set",2),get:a("get",1),has:a("has",1),remove:a("delete",1),proto:c.prototype}},21515:(E,C,s)=>{"use strict";var r=s(87463);E.exports=function(a,c,u){return u?r(a.entries(),function(e){return c(e[1],e[0])},!0):a.forEach(c)}},57729:(E,C,s)=>{"use strict";var r=s(25401),a=s(61812),c=s(52208),u=s(64562),e=TypeError;E.exports=function(m,T){var $,M=u(this),w=a(M.get),D=a(M.has),U=a(M.set),W=arguments.length>2?arguments[2]:void 0;if(!c(T)&&!c(W))throw e("At least one callback required");return r(D,M,m)?($=r(w,M,m),c(T)&&($=T($),r(U,M,m,$))):c(W)&&($=W(),r(U,M,m,$)),$}},8651:E=>{"use strict";var C=Math.ceil,s=Math.floor;E.exports=Math.trunc||function(a){var c=+a;return(c>0?s:C)(c)}},53460:(E,C,s)=>{"use strict";var F,X,de,V,ce,r=s(70009),a=s(76781),c=s(25525).f,u=s(37352).set,e=s(70918),f=s(3877),m=s(16137),T=s(85308),M=s(3787),w=r.MutationObserver||r.WebKitMutationObserver,D=r.document,U=r.process,W=r.Promise,$=c(r,"queueMicrotask"),J=$&&$.value;if(!J){var se=new e,fe=function(){var Te,$e;for(M&&(Te=U.domain)&&Te.exit();$e=se.get();)try{$e()}catch(ge){throw se.head&&F(),ge}Te&&Te.enter()};f||M||T||!w||!D?!m&&W&&W.resolve?((V=W.resolve(void 0)).constructor=W,ce=a(V.then,V),F=function(){ce(fe)}):M?F=function(){U.nextTick(fe)}:(u=a(u,r),F=function(){u(fe)}):(X=!0,de=D.createTextNode(""),new w(fe).observe(de,{characterData:!0}),F=function(){de.data=X=!X}),J=function(Te){se.head||F(),se.add(Te)}}E.exports=J},54256:(E,C,s)=>{"use strict";var r=s(61812),a=TypeError,c=function(u){var e,f;this.promise=new u(function(m,T){if(void 0!==e||void 0!==f)throw a("Bad Promise constructor");e=m,f=T}),this.resolve=r(e),this.reject=r(f)};E.exports.f=function(u){return new c(u)}},63313:(E,C,s)=>{"use strict";var r=s(41433);E.exports=function(a,c){return void 0===a?arguments.length<2?"":c:r(a)}},56421:(E,C,s)=>{"use strict";var r=s(60373),a=TypeError;E.exports=function(c){if(r(c))throw a("The method doesn't accept regular expressions");return c}},75791:(E,C,s)=>{"use strict";var r=s(49642),a=s(23634),c=s(25401),u=s(55756),e=s(28474),f=s(47238),m=s(25558),T=s(70267),M=s(20973),w=Object.assign,D=Object.defineProperty,U=a([].concat);E.exports=!w||u(function(){if(r&&1!==w({b:1},w(D({},"a",{enumerable:!0,get:function(){D(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var W={},$={},J=Symbol("assign detection"),F="abcdefghijklmnopqrst";return W[J]=7,F.split("").forEach(function(X){$[X]=X}),7!==w({},W)[J]||e(w({},$)).join("")!==F})?function($,J){for(var F=T($),X=arguments.length,de=1,V=f.f,ce=m.f;X>de;)for(var ge,se=M(arguments[de++]),fe=V?U(e(se),V(se)):e(se),Te=fe.length,$e=0;Te>$e;)ge=fe[$e++],(!r||c(ce,se,ge))&&(F[ge]=se[ge]);return F}:w},83272:(E,C,s)=>{"use strict";var X,r=s(64562),a=s(25913),c=s(44939),u=s(45599),e=s(55690),f=s(96682),m=s(86066),w="prototype",D="script",U=m("IE_PROTO"),W=function(){},$=function(V){return"<"+D+">"+V+""},J=function(V){V.write($("")),V.close();var ce=V.parentWindow.Object;return V=null,ce},de=function(){try{X=new ActiveXObject("htmlfile")}catch{}de=typeof document<"u"?document.domain&&X?J(X):function(){var se,V=f("iframe"),ce="java"+D+":";return V.style.display="none",e.appendChild(V),V.src=String(ce),(se=V.contentWindow.document).open(),se.write($("document.F=Object")),se.close(),se.F}():J(X);for(var V=c.length;V--;)delete de[w][c[V]];return de()};u[U]=!0,E.exports=Object.create||function(ce,se){var fe;return null!==ce?(W[w]=r(ce),fe=new W,W[w]=null,fe[U]=ce):fe=de(),void 0===se?fe:a.f(fe,se)}},25913:(E,C,s)=>{"use strict";var r=s(49642),a=s(47960),c=s(48011),u=s(64562),e=s(81010),f=s(28474);C.f=r&&!a?Object.defineProperties:function(T,M){u(T);for(var $,w=e(M),D=f(M),U=D.length,W=0;U>W;)c.f(T,$=D[W++],w[$]);return T}},48011:(E,C,s)=>{"use strict";var r=s(49642),a=s(50495),c=s(47960),u=s(64562),e=s(62939),f=TypeError,m=Object.defineProperty,T=Object.getOwnPropertyDescriptor,M="enumerable",w="configurable",D="writable";C.f=r?c?function(W,$,J){if(u(W),$=e($),u(J),"function"==typeof W&&"prototype"===$&&"value"in J&&D in J&&!J[D]){var F=T(W,$);F&&F[D]&&(W[$]=J.value,J={configurable:w in J?J[w]:F[w],enumerable:M in J?J[M]:F[M],writable:!1})}return m(W,$,J)}:m:function(W,$,J){if(u(W),$=e($),u(J),a)try{return m(W,$,J)}catch{}if("get"in J||"set"in J)throw f("Accessors not supported");return"value"in J&&(W[$]=J.value),W}},25525:(E,C,s)=>{"use strict";var r=s(49642),a=s(25401),c=s(25558),u=s(51361),e=s(81010),f=s(62939),m=s(80112),T=s(50495),M=Object.getOwnPropertyDescriptor;C.f=r?M:function(D,U){if(D=e(D),U=f(U),T)try{return M(D,U)}catch{}if(m(D,U))return u(!a(c.f,D,U),D[U])}},62469:(E,C,s)=>{"use strict";var r=s(49806),a=s(81010),c=s(51518).f,u=s(8681),e="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];E.exports.f=function(T){return e&&"Window"===r(T)?function(m){try{return c(m)}catch{return u(e)}}(T):c(a(T))}},51518:(E,C,s)=>{"use strict";var r=s(66250),c=s(44939).concat("length","prototype");C.f=Object.getOwnPropertyNames||function(e){return r(e,c)}},47238:(E,C)=>{"use strict";C.f=Object.getOwnPropertySymbols},31426:(E,C,s)=>{"use strict";var r=s(80112),a=s(52208),c=s(70267),u=s(86066),e=s(37112),f=u("IE_PROTO"),m=Object,T=m.prototype;E.exports=e?m.getPrototypeOf:function(M){var w=c(M);if(r(w,f))return w[f];var D=w.constructor;return a(D)&&w instanceof D?D.prototype:w instanceof m?T:null}},46401:(E,C,s)=>{"use strict";var r=s(55756),a=s(77293),c=s(49806),u=s(76318),e=Object.isExtensible,f=r(function(){e(1)});E.exports=f||u?function(T){return!(!a(T)||u&&"ArrayBuffer"===c(T))&&(!e||e(T))}:e},23336:(E,C,s)=>{"use strict";var r=s(23634);E.exports=r({}.isPrototypeOf)},66250:(E,C,s)=>{"use strict";var r=s(23634),a=s(80112),c=s(81010),u=s(95171).indexOf,e=s(45599),f=r([].push);E.exports=function(m,T){var U,M=c(m),w=0,D=[];for(U in M)!a(e,U)&&a(M,U)&&f(D,U);for(;T.length>w;)a(M,U=T[w++])&&(~u(D,U)||f(D,U));return D}},28474:(E,C,s)=>{"use strict";var r=s(66250),a=s(44939);E.exports=Object.keys||function(u){return r(u,a)}},25558:(E,C)=>{"use strict";var s={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,a=r&&!s.call({1:2},1);C.f=a?function(u){var e=r(this,u);return!!e&&e.enumerable}:s},54945:(E,C,s)=>{"use strict";var r=s(13325),a=s(64562),c=s(93221);E.exports=Object.setPrototypeOf||("__proto__"in{}?function(){var f,u=!1,e={};try{(f=r(Object.prototype,"__proto__","set"))(e,[]),u=e instanceof Array}catch{}return function(T,M){return a(T),c(M),u?f(T,M):T.__proto__=M,T}}():void 0)},36805:(E,C,s)=>{"use strict";var r=s(49642),a=s(55756),c=s(23634),u=s(31426),e=s(28474),f=s(81010),T=c(s(25558).f),M=c([].push),w=r&&a(function(){var U=Object.create(null);return U[2]=2,!T(U,2)}),D=function(U){return function(W){for(var ce,$=f(W),J=e($),F=w&&null===u($),X=J.length,de=0,V=[];X>de;)ce=J[de++],(!r||(F?ce in $:T($,ce)))&&M(V,U?[ce,$[ce]]:$[ce]);return V}};E.exports={entries:D(!0),values:D(!1)}},97686:(E,C,s)=>{"use strict";var r=s(5552),a=s(35329);E.exports=r?{}.toString:function(){return"[object "+a(this)+"]"}},71689:(E,C,s)=>{"use strict";var r=s(25401),a=s(52208),c=s(77293),u=TypeError;E.exports=function(e,f){var m,T;if("string"===f&&a(m=e.toString)&&!c(T=r(m,e))||a(m=e.valueOf)&&!c(T=r(m,e))||"string"!==f&&a(m=e.toString)&&!c(T=r(m,e)))return T;throw u("Can't convert object to primitive value")}},59823:(E,C,s)=>{"use strict";var r=s(7365),a=s(23634),c=s(51518),u=s(47238),e=s(64562),f=a([].concat);E.exports=r("Reflect","ownKeys")||function(T){var M=c.f(e(T)),w=u.f;return w?f(M,w(T)):M}},13544:E=>{"use strict";E.exports={}},26975:E=>{"use strict";E.exports=function(C){try{return{error:!1,value:C()}}catch(s){return{error:!0,value:s}}}},9936:(E,C,s)=>{"use strict";var r=s(70009),a=s(46456),c=s(52208),u=s(79482),e=s(26699),f=s(91840),m=s(34008),T=s(31813),M=s(81124),w=s(63556),D=a&&a.prototype,U=f("species"),W=!1,$=c(r.PromiseRejectionEvent),J=u("Promise",function(){var F=e(a),X=F!==String(a);if(!X&&66===w||M&&(!D.catch||!D.finally))return!0;if(!w||w<51||!/native code/.test(F)){var de=new a(function(se){se(1)}),V=function(se){se(function(){},function(){})};if((de.constructor={})[U]=V,!(W=de.then(function(){})instanceof V))return!0}return!X&&(m||T)&&!$});E.exports={CONSTRUCTOR:J,REJECTION_EVENT:$,SUBCLASSING:W}},46456:(E,C,s)=>{"use strict";var r=s(70009);E.exports=r.Promise},25524:(E,C,s)=>{"use strict";var r=s(64562),a=s(77293),c=s(54256);E.exports=function(u,e){if(r(u),a(e)&&e.constructor===u)return e;var f=c.f(u);return(0,f.resolve)(e),f.promise}},95758:(E,C,s)=>{"use strict";var r=s(46456),a=s(5253),c=s(9936).CONSTRUCTOR;E.exports=c||!a(function(u){r.all(u).then(void 0,function(){})})},70918:E=>{"use strict";var C=function(){this.head=null,this.tail=null};C.prototype={add:function(s){var r={item:s,next:null},a=this.tail;a?a.next=r:this.head=r,this.tail=r},get:function(){var s=this.head;if(s)return null===(this.head=s.next)&&(this.tail=null),s.item}},E.exports=C},67917:(E,C,s)=>{"use strict";var r=s(43550),a=TypeError;E.exports=function(c){if(r(c))throw a("Can't call method on "+c);return c}},29627:E=>{"use strict";E.exports=function(C,s){return C===s||C!=C&&s!=s}},53814:(E,C,s)=>{"use strict";var w,r=s(70009),a=s(2543),c=s(52208),u=s(70902),e=s(86053),f=s(37591),m=s(15086),T=r.Function,M=/MSIE .\./.test(e)||u&&((w=r.Bun.version.split(".")).length<3||"0"===w[0]&&(w[1]<3||"3"===w[1]&&"0"===w[2]));E.exports=function(w,D){var U=D?2:1;return M?function(W,$){var J=m(arguments.length,1)>U,F=c(W)?W:T(W),X=J?f(arguments,U):[],de=J?function(){a(F,this,X)}:F;return D?w(de,$):w(de)}:w}},58014:(E,C,s)=>{"use strict";var r=s(7365),a=s(1707),c=s(91840),u=s(49642),e=c("species");E.exports=function(f){var m=r(f);u&&m&&!m[e]&&a(m,e,{configurable:!0,get:function(){return this}})}},85681:(E,C,s)=>{"use strict";var r=s(5552),a=s(48011).f,c=s(65162),u=s(80112),e=s(97686),m=s(91840)("toStringTag");E.exports=function(T,M,w,D){if(T){var U=w?T:T.prototype;u(U,m)||a(U,m,{configurable:!0,value:M}),D&&!r&&c(U,"toString",e)}}},86066:(E,C,s)=>{"use strict";var r=s(64579),a=s(13708),c=r("keys");E.exports=function(u){return c[u]||(c[u]=a(u))}},24766:(E,C,s)=>{"use strict";var r=s(70009),a=s(34056),c="__core-js_shared__",u=r[c]||a(c,{});E.exports=u},64579:(E,C,s)=>{"use strict";var r=s(81124),a=s(24766);(E.exports=function(c,u){return a[c]||(a[c]=void 0!==u?u:{})})("versions",[]).push({version:"3.32.2",mode:r?"pure":"global",copyright:"\xa9 2014-2023 Denis Pushkarev (zloirock.ru)",license:"https://github.com/zloirock/core-js/blob/v3.32.2/LICENSE",source:"https://github.com/zloirock/core-js"})},95869:(E,C,s)=>{"use strict";var r=s(64562),a=s(54356),c=s(43550),e=s(91840)("species");E.exports=function(f,m){var M,T=r(f).constructor;return void 0===T||c(M=r(T)[e])?m:a(M)}},61557:(E,C,s)=>{"use strict";var r=s(23634),a=s(33912),c=s(41433),u=s(67917),e=r("".charAt),f=r("".charCodeAt),m=r("".slice),T=function(M){return function(w,D){var J,F,U=c(u(w)),W=a(D),$=U.length;return W<0||W>=$?M?"":void 0:(J=f(U,W))<55296||J>56319||W+1===$||(F=f(U,W+1))<56320||F>57343?M?e(U,W):J:M?m(U,W,W+2):F-56320+(J-55296<<10)+65536}};E.exports={codeAt:T(!1),charAt:T(!0)}},26662:(E,C,s)=>{"use strict";var r=s(23634),a=2147483647,D=/[^\0-\u007E]/,U=/[.\u3002\uFF0E\uFF61]/g,W="Overflow: input needs wider integers to process",J=RangeError,F=r(U.exec),X=Math.floor,de=String.fromCharCode,V=r("".charCodeAt),ce=r([].join),se=r([].push),fe=r("".replace),Te=r("".split),$e=r("".toLowerCase),Et=function(qe){return qe+22+75*(qe<26)},ot=function(qe,He,We){var Le=0;for(qe=We?X(qe/700):qe>>1,qe+=X(qe/He);qe>455;)qe=X(qe/35),Le+=36;return X(Le+36*qe/(qe+38))},ct=function(qe){var He=[];qe=function(qe){for(var He=[],We=0,Le=qe.length;We=55296&&Pt<=56319&&We=Le&&cnX((a-Pt)/qt))throw J(W);for(Pt+=(At-Le)*qt,Le=At,Xt=0;Xta)throw J(W);if(cn===Le){for(var sn=Pt,fn=36;;){var xn=fn<=it?1:fn>=it+26?26:fn-it;if(sn{"use strict";var r=s(33912),a=s(41433),c=s(67917),u=RangeError;E.exports=function(f){var m=a(c(this)),T="",M=r(f);if(M<0||M===1/0)throw u("Wrong number of repetitions");for(;M>0;(M>>>=1)&&(m+=m))1&M&&(T+=m);return T}},85462:(E,C,s)=>{"use strict";var r=s(29862).PROPER,a=s(55756),c=s(88185);E.exports=function(e){return a(function(){return!!c[e]()||"\u200b\x85\u180e"!=="\u200b\x85\u180e"[e]()||r&&c[e].name!==e})}},89858:(E,C,s)=>{"use strict";var r=s(23634),a=s(67917),c=s(41433),u=s(88185),e=r("".replace),f=RegExp("^["+u+"]+"),m=RegExp("(^|[^"+u+"])["+u+"]+$"),T=function(M){return function(w){var D=c(a(w));return 1&M&&(D=e(D,f,"")),2&M&&(D=e(D,m,"$1")),D}};E.exports={start:T(1),end:T(2),trim:T(3)}},98535:(E,C,s)=>{"use strict";var r=s(63556),a=s(55756),u=s(70009).String;E.exports=!!Object.getOwnPropertySymbols&&!a(function(){var e=Symbol("symbol detection");return!u(e)||!(Object(e)instanceof Symbol)||!Symbol.sham&&r&&r<41})},56992:(E,C,s)=>{"use strict";var r=s(25401),a=s(7365),c=s(91840),u=s(42915);E.exports=function(){var e=a("Symbol"),f=e&&e.prototype,m=f&&f.valueOf,T=c("toPrimitive");f&&!f[T]&&u(f,T,function(M){return r(m,this)},{arity:1})}},86475:(E,C,s)=>{"use strict";var r=s(7365),a=s(23634),c=r("Symbol"),u=c.keyFor,e=a(c.prototype.valueOf);E.exports=c.isRegisteredSymbol||function(m){try{return void 0!==u(e(m))}catch{return!1}}},74110:(E,C,s)=>{"use strict";for(var r=s(64579),a=s(7365),c=s(23634),u=s(74717),e=s(91840),f=a("Symbol"),m=f.isWellKnownSymbol,T=a("Object","getOwnPropertyNames"),M=c(f.prototype.valueOf),w=r("wks"),D=0,U=T(f),W=U.length;D{"use strict";var r=s(98535);E.exports=r&&!!Symbol.for&&!!Symbol.keyFor},37352:(E,C,s)=>{"use strict";var Te,$e,ge,Et,r=s(70009),a=s(2543),c=s(76781),u=s(52208),e=s(80112),f=s(55756),m=s(55690),T=s(37591),M=s(96682),w=s(15086),D=s(3877),U=s(3787),W=r.setImmediate,$=r.clearImmediate,J=r.process,F=r.Dispatch,X=r.Function,de=r.MessageChannel,V=r.String,ce=0,se={},fe="onreadystatechange";f(function(){Te=r.location});var ot=function(We){if(e(se,We)){var Le=se[We];delete se[We],Le()}},ct=function(We){return function(){ot(We)}},qe=function(We){ot(We.data)},He=function(We){r.postMessage(V(We),Te.protocol+"//"+Te.host)};(!W||!$)&&(W=function(Le){w(arguments.length,1);var Pt=u(Le)?Le:X(Le),it=T(arguments,1);return se[++ce]=function(){a(Pt,void 0,it)},$e(ce),ce},$=function(Le){delete se[Le]},U?$e=function(We){J.nextTick(ct(We))}:F&&F.now?$e=function(We){F.now(ct(We))}:de&&!D?(Et=(ge=new de).port2,ge.port1.onmessage=qe,$e=c(Et.postMessage,Et)):r.addEventListener&&u(r.postMessage)&&!r.importScripts&&Te&&"file:"!==Te.protocol&&!f(He)?($e=He,r.addEventListener("message",qe,!1)):$e=fe in M("script")?function(We){m.appendChild(M("script"))[fe]=function(){m.removeChild(this),ot(We)}}:function(We){setTimeout(ct(We),0)}),E.exports={set:W,clear:$}},19401:(E,C,s)=>{"use strict";var r=s(33912),a=Math.max,c=Math.min;E.exports=function(u,e){var f=r(u);return f<0?a(f+e,0):c(f,e)}},81010:(E,C,s)=>{"use strict";var r=s(20973),a=s(67917);E.exports=function(c){return r(a(c))}},33912:(E,C,s)=>{"use strict";var r=s(8651);E.exports=function(a){var c=+a;return c!=c||0===c?0:r(c)}},48869:(E,C,s)=>{"use strict";var r=s(33912),a=Math.min;E.exports=function(c){return c>0?a(r(c),9007199254740991):0}},70267:(E,C,s)=>{"use strict";var r=s(67917),a=Object;E.exports=function(c){return a(r(c))}},1645:(E,C,s)=>{"use strict";var r=s(25401),a=s(77293),c=s(74717),u=s(34778),e=s(71689),f=s(91840),m=TypeError,T=f("toPrimitive");E.exports=function(M,w){if(!a(M)||c(M))return M;var U,D=u(M,T);if(D){if(void 0===w&&(w="default"),U=r(D,M,w),!a(U)||c(U))return U;throw m("Can't convert object to primitive value")}return void 0===w&&(w="number"),e(M,w)}},62939:(E,C,s)=>{"use strict";var r=s(1645),a=s(74717);E.exports=function(c){var u=r(c,"string");return a(u)?u:u+""}},5552:(E,C,s)=>{"use strict";var c={};c[s(91840)("toStringTag")]="z",E.exports="[object z]"===String(c)},41433:(E,C,s)=>{"use strict";var r=s(35329),a=String;E.exports=function(c){if("Symbol"===r(c))throw TypeError("Cannot convert a Symbol value to a string");return a(c)}},7378:E=>{"use strict";var C=String;E.exports=function(s){try{return C(s)}catch{return"Object"}}},13708:(E,C,s)=>{"use strict";var r=s(23634),a=0,c=Math.random(),u=r(1..toString);E.exports=function(e){return"Symbol("+(void 0===e?"":e)+")_"+u(++a+c,36)}},54933:(E,C,s)=>{"use strict";var r=s(55756),a=s(91840),c=s(49642),u=s(81124),e=a("iterator");E.exports=!r(function(){var f=new URL("b?a=1&b=2&c=3","http://a"),m=f.searchParams,T=new URLSearchParams("a=1&a=2&b=3"),M="";return f.pathname="c%20d",m.forEach(function(w,D){m.delete("b"),M+=D+w}),T.delete("a",2),T.delete("b",void 0),u&&(!f.toJSON||!T.has("a",1)||T.has("a",2)||!T.has("a",void 0)||T.has("b"))||!m.size&&(u||!c)||!m.sort||"http://a/c%20d?a=1&c=3"!==f.href||"3"!==m.get("c")||"a=1"!==String(new URLSearchParams("?a=1"))||!m[e]||"a"!==new URL("https://a@b").username||"b"!==new URLSearchParams(new URLSearchParams("a=b")).get("a")||"xn--e1aybc"!==new URL("http://\u0442\u0435\u0441\u0442").host||"#%D0%B1"!==new URL("http://a#\u0431").hash||"a1c3"!==M||"x"!==new URL("http://x",void 0).host})},99554:(E,C,s)=>{"use strict";var r=s(98535);E.exports=r&&!Symbol.sham&&"symbol"==typeof Symbol.iterator},47960:(E,C,s)=>{"use strict";var r=s(49642),a=s(55756);E.exports=r&&a(function(){return 42!==Object.defineProperty(function(){},"prototype",{value:42,writable:!1}).prototype})},15086:E=>{"use strict";var C=TypeError;E.exports=function(s,r){if(s{"use strict";var r=s(70009),a=s(52208),c=r.WeakMap;E.exports=a(c)&&/native code/.test(String(c))},25374:(E,C,s)=>{"use strict";var r=s(13544),a=s(80112),c=s(89734),u=s(48011).f;E.exports=function(e){var f=r.Symbol||(r.Symbol={});a(f,e)||u(f,e,{value:c.f(e)})}},89734:(E,C,s)=>{"use strict";var r=s(91840);C.f=r},91840:(E,C,s)=>{"use strict";var r=s(70009),a=s(64579),c=s(80112),u=s(13708),e=s(98535),f=s(99554),m=r.Symbol,T=a("wks"),M=f?m.for||m:m&&m.withoutSetter||u;E.exports=function(w){return c(T,w)||(T[w]=e&&c(m,w)?m[w]:M("Symbol."+w)),T[w]}},88185:E=>{"use strict";E.exports="\t\n\v\f\r \xa0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029\ufeff"},70210:(E,C,s)=>{"use strict";var r=s(90513),a=s(23336),c=s(31426),u=s(54945),e=s(65031),f=s(83272),m=s(65162),T=s(51361),M=s(33411),w=s(77732),D=s(41605),U=s(63313),$=s(91840)("toStringTag"),J=Error,F=[].push,X=function(ce,se){var Te,fe=a(de,this);u?Te=u(J(),fe?c(this):de):(Te=fe?this:f(de),m(Te,$,"Error")),void 0!==se&&m(Te,"message",U(se)),w(Te,X,Te.stack,1),arguments.length>2&&M(Te,arguments[2]);var $e=[];return D(ce,F,{that:$e}),m(Te,"errors",$e),Te};u?u(X,J):e(X,J,{name:!0});var de=X.prototype=f(J.prototype,{constructor:T(1,X),message:T(1,""),name:T(1,"AggregateError")});r({global:!0,constructor:!0,arity:2},{AggregateError:X})},10901:(E,C,s)=>{"use strict";s(70210)},1625:(E,C,s)=>{"use strict";var r=s(90513),a=s(55756),c=s(89735),u=s(77293),e=s(70267),f=s(6381),m=s(11594),T=s(46751),M=s(2103),w=s(95913),D=s(91840),U=s(63556),W=D("isConcatSpreadable"),$=U>=51||!a(function(){var X=[];return X[W]=!1,X.concat()[0]!==X}),J=function(X){if(!u(X))return!1;var de=X[W];return void 0!==de?!!de:c(X)};r({target:"Array",proto:!0,arity:1,forced:!$||!w("concat")},{concat:function(de){var fe,Te,$e,ge,Et,V=e(this),ce=M(V,0),se=0;for(fe=-1,$e=arguments.length;fe<$e;fe++)if(J(Et=-1===fe?V:arguments[fe]))for(ge=f(Et),m(se+ge),Te=0;Te{"use strict";var r=s(90513),a=s(68607).every;r({target:"Array",proto:!0,forced:!s(33620)("every")},{every:function(f){return a(this,f,arguments.length>1?arguments[1]:void 0)}})},24990:(E,C,s)=>{"use strict";var r=s(90513),a=s(35277),c=s(82196);r({target:"Array",proto:!0},{fill:a}),c("fill")},56534:(E,C,s)=>{"use strict";var r=s(90513),a=s(68607).filter;r({target:"Array",proto:!0,forced:!s(95913)("filter")},{filter:function(f){return a(this,f,arguments.length>1?arguments[1]:void 0)}})},12773:(E,C,s)=>{"use strict";var r=s(90513),a=s(68607).findIndex,c=s(82196),u="findIndex",e=!0;u in[]&&Array(1)[u](function(){e=!1}),r({target:"Array",proto:!0,forced:e},{findIndex:function(m){return a(this,m,arguments.length>1?arguments[1]:void 0)}}),c(u)},60326:(E,C,s)=>{"use strict";var r=s(90513),a=s(68607).find,c=s(82196),u="find",e=!0;u in[]&&Array(1)[u](function(){e=!1}),r({target:"Array",proto:!0,forced:e},{find:function(m){return a(this,m,arguments.length>1?arguments[1]:void 0)}}),c(u)},98792:(E,C,s)=>{"use strict";var r=s(90513),a=s(8366);r({target:"Array",proto:!0,forced:[].forEach!==a},{forEach:a})},261:(E,C,s)=>{"use strict";var r=s(90513),a=s(51923);r({target:"Array",stat:!0,forced:!s(5253)(function(e){Array.from(e)})},{from:a})},77059:(E,C,s)=>{"use strict";var r=s(90513),a=s(95171).includes,c=s(55756),u=s(82196);r({target:"Array",proto:!0,forced:c(function(){return!Array(1).includes()})},{includes:function(m){return a(this,m,arguments.length>1?arguments[1]:void 0)}}),u("includes")},2795:(E,C,s)=>{"use strict";var r=s(90513),a=s(64350),c=s(95171).indexOf,u=s(33620),e=a([].indexOf),f=!!e&&1/e([1],1,-0)<0;r({target:"Array",proto:!0,forced:f||!u("indexOf")},{indexOf:function(M){var w=arguments.length>1?arguments[1]:void 0;return f?e(this,M,w)||0:c(this,M,w)}})},2862:(E,C,s)=>{"use strict";s(90513)({target:"Array",stat:!0},{isArray:s(89735)})},1285:(E,C,s)=>{"use strict";var r=s(81010),a=s(82196),c=s(84394),u=s(91093),e=s(48011).f,f=s(79077),m=s(28738),T=s(81124),M=s(49642),w="Array Iterator",D=u.set,U=u.getterFor(w);E.exports=f(Array,"Array",function($,J){D(this,{type:w,target:r($),index:0,kind:J})},function(){var $=U(this),J=$.target,F=$.kind,X=$.index++;if(!J||X>=J.length)return $.target=void 0,m(void 0,!0);switch(F){case"keys":return m(X,!1);case"values":return m(J[X],!1)}return m([X,J[X]],!1)},"values");var W=c.Arguments=c.Array;if(a("keys"),a("values"),a("entries"),!T&&M&&"values"!==W.name)try{e(W,"name",{value:"values"})}catch{}},74926:(E,C,s)=>{"use strict";var r=s(90513),a=s(78375);r({target:"Array",proto:!0,forced:a!==[].lastIndexOf},{lastIndexOf:a})},88119:(E,C,s)=>{"use strict";var r=s(90513),a=s(68607).map;r({target:"Array",proto:!0,forced:!s(95913)("map")},{map:function(f){return a(this,f,arguments.length>1?arguments[1]:void 0)}})},93870:(E,C,s)=>{"use strict";var r=s(90513),a=s(70267),c=s(6381),u=s(54716),e=s(11594);r({target:"Array",proto:!0,arity:1,forced:s(55756)(function(){return 4294967297!==[].push.call({length:4294967296},1)})||!function(){try{Object.defineProperty([],"length",{writable:!1}).push()}catch(w){return w instanceof TypeError}}()},{push:function(D){var U=a(this),W=c(U),$=arguments.length;e(W+$);for(var J=0;J<$;J++)U[W]=arguments[J],W++;return u(U,W),W}})},46250:(E,C,s)=>{"use strict";var r=s(90513),a=s(88908).left,c=s(33620),u=s(63556);r({target:"Array",proto:!0,forced:!s(3787)&&u>79&&u<83||!c("reduce")},{reduce:function(M){var w=arguments.length;return a(this,M,w,w>1?arguments[1]:void 0)}})},32836:(E,C,s)=>{"use strict";var r=s(90513),a=s(23634),c=s(89735),u=a([].reverse),e=[1,2];r({target:"Array",proto:!0,forced:String(e)===String(e.reverse())},{reverse:function(){return c(this)&&(this.length=this.length),u(this)}})},72999:(E,C,s)=>{"use strict";var r=s(90513),a=s(89735),c=s(81177),u=s(77293),e=s(19401),f=s(6381),m=s(81010),T=s(46751),M=s(91840),w=s(95913),D=s(37591),U=w("slice"),W=M("species"),$=Array,J=Math.max;r({target:"Array",proto:!0,forced:!U},{slice:function(X,de){var Te,$e,ge,V=m(this),ce=f(V),se=e(X,ce),fe=e(void 0===de?ce:de,ce);if(a(V)&&((c(Te=V.constructor)&&(Te===$||a(Te.prototype))||u(Te)&&null===(Te=Te[W]))&&(Te=void 0),Te===$||void 0===Te))return D(V,se,fe);for($e=new(void 0===Te?$:Te)(J(fe-se,0)),ge=0;se{"use strict";var r=s(90513),a=s(68607).some;r({target:"Array",proto:!0,forced:!s(33620)("some")},{some:function(f){return a(this,f,arguments.length>1?arguments[1]:void 0)}})},93639:(E,C,s)=>{"use strict";var r=s(90513),a=s(23634),c=s(61812),u=s(70267),e=s(6381),f=s(67236),m=s(41433),T=s(55756),M=s(84865),w=s(33620),D=s(36410),U=s(5329),W=s(63556),$=s(34545),J=[],F=a(J.sort),X=a(J.push),de=T(function(){J.sort(void 0)}),V=T(function(){J.sort(null)}),ce=w("sort"),se=!T(function(){if(W)return W<70;if(!(D&&D>3)){if(U)return!0;if($)return $<603;var ge,Et,ot,ct,$e="";for(ge=65;ge<76;ge++){switch(Et=String.fromCharCode(ge),ge){case 66:case 69:case 70:case 72:ot=3;break;case 68:case 71:ot=4;break;default:ot=2}for(ct=0;ct<47;ct++)J.push({k:Et+ct,v:ot})}for(J.sort(function(qe,He){return He.v-qe.v}),ct=0;ctm(Et)?1:-1}}(ge)),qe=e(ot),He=0;He{"use strict";var r=s(90513),a=s(70267),c=s(19401),u=s(33912),e=s(6381),f=s(54716),m=s(11594),T=s(2103),M=s(46751),w=s(67236),U=s(95913)("splice"),W=Math.max,$=Math.min;r({target:"Array",proto:!0,forced:!U},{splice:function(F,X){var fe,Te,$e,ge,Et,ot,de=a(this),V=e(de),ce=c(F,V),se=arguments.length;for(0===se?fe=Te=0:1===se?(fe=0,Te=V-ce):(fe=se-2,Te=$(W(u(X),0),V-ce)),m(V+fe-Te),$e=T(de,Te),ge=0;geV-Te+fe;ge--)w(de,ge-1)}else if(fe>Te)for(ge=V-Te;ge>ce;ge--)ot=ge+fe-1,(Et=ge+Te-1)in de?de[ot]=de[Et]:w(de,ot);for(ge=0;ge{"use strict";var r=s(90513),a=s(23634),c=Date,u=a(c.prototype.getTime);r({target:"Date",stat:!0},{now:function(){return u(new c)}})},68154:()=>{},33379:(E,C,s)=>{"use strict";var r=s(90513),a=s(44197);r({target:"Function",proto:!0,forced:Function.bind!==a},{bind:a})},75071:(E,C,s)=>{"use strict";var r=s(90513),a=s(7365),c=s(2543),u=s(25401),e=s(23634),f=s(55756),m=s(52208),T=s(74717),M=s(37591),w=s(32092),D=s(98535),U=String,W=a("JSON","stringify"),$=e(/./.exec),J=e("".charAt),F=e("".charCodeAt),X=e("".replace),de=e(1..toString),V=/[\uD800-\uDFFF]/g,ce=/^[\uD800-\uDBFF]$/,se=/^[\uDC00-\uDFFF]$/,fe=!D||f(function(){var Et=a("Symbol")("stringify detection");return"[null]"!==W([Et])||"{}"!==W({a:Et})||"{}"!==W(Object(Et))}),Te=f(function(){return'"\\udf06\\ud834"'!==W("\udf06\ud834")||'"\\udead"'!==W("\udead")}),$e=function(Et,ot){var ct=M(arguments),qe=w(ot);if(m(qe)||void 0!==Et&&!T(Et))return ct[1]=function(He,We){if(m(qe)&&(We=u(qe,this,U(He),We)),!T(We))return We},c(W,null,ct)},ge=function(Et,ot,ct){var qe=J(ct,ot-1),He=J(ct,ot+1);return $(ce,Et)&&!$(se,He)||$(se,Et)&&!$(ce,qe)?"\\u"+de(F(Et,0),16):Et};W&&r({target:"JSON",stat:!0,arity:3,forced:fe||Te},{stringify:function(ot,ct,qe){var He=M(arguments),We=c(fe?$e:W,null,He);return Te&&"string"==typeof We?X(We,V,ge):We}})},32300:(E,C,s)=>{"use strict";var r=s(70009);s(85681)(r.JSON,"JSON",!0)},83616:(E,C,s)=>{"use strict";s(85116)("Map",function(c){return function(){return c(this,arguments.length?arguments[0]:void 0)}},s(26650))},85140:(E,C,s)=>{"use strict";s(83616)},63603:()=>{},67234:(E,C,s)=>{"use strict";var r=s(90513),a=s(75791);r({target:"Object",stat:!0,arity:2,forced:Object.assign!==a},{assign:a})},86516:(E,C,s)=>{"use strict";s(90513)({target:"Object",stat:!0,sham:!s(49642)},{create:s(83272)})},36255:(E,C,s)=>{"use strict";var r=s(90513),a=s(49642),c=s(25913).f;r({target:"Object",stat:!0,forced:Object.defineProperties!==c,sham:!a},{defineProperties:c})},84468:(E,C,s)=>{"use strict";var r=s(90513),a=s(49642),c=s(48011).f;r({target:"Object",stat:!0,forced:Object.defineProperty!==c,sham:!a},{defineProperty:c})},86627:(E,C,s)=>{"use strict";var r=s(90513),a=s(55756),c=s(81010),u=s(25525).f,e=s(49642);r({target:"Object",stat:!0,forced:!e||a(function(){u(1)}),sham:!e},{getOwnPropertyDescriptor:function(T,M){return u(c(T),M)}})},78275:(E,C,s)=>{"use strict";var r=s(90513),a=s(49642),c=s(59823),u=s(81010),e=s(25525),f=s(46751);r({target:"Object",stat:!0,sham:!a},{getOwnPropertyDescriptors:function(T){for(var $,J,M=u(T),w=e.f,D=c(M),U={},W=0;D.length>W;)void 0!==(J=w(M,$=D[W++]))&&f(U,$,J);return U}})},37764:(E,C,s)=>{"use strict";var r=s(90513),a=s(98535),c=s(55756),u=s(47238),e=s(70267);r({target:"Object",stat:!0,forced:!a||c(function(){u.f(1)})},{getOwnPropertySymbols:function(T){var M=u.f;return M?M(e(T)):[]}})},31193:(E,C,s)=>{"use strict";var r=s(90513),a=s(55756),c=s(70267),u=s(31426),e=s(37112);r({target:"Object",stat:!0,forced:a(function(){u(1)}),sham:!e},{getPrototypeOf:function(T){return u(c(T))}})},56557:(E,C,s)=>{"use strict";var r=s(90513),a=s(70267),c=s(28474);r({target:"Object",stat:!0,forced:s(55756)(function(){c(1)})},{keys:function(m){return c(a(m))}})},17971:(E,C,s)=>{"use strict";s(90513)({target:"Object",stat:!0},{setPrototypeOf:s(54945)})},17221:()=>{},88923:(E,C,s)=>{"use strict";var r=s(90513),a=s(36805).values;r({target:"Object",stat:!0},{values:function(u){return a(u)}})},84798:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(61812),u=s(54256),e=s(26975),f=s(41605);r({target:"Promise",stat:!0,forced:s(95758)},{allSettled:function(M){var w=this,D=u.f(w),U=D.resolve,W=D.reject,$=e(function(){var J=c(w.resolve),F=[],X=0,de=1;f(M,function(V){var ce=X++,se=!1;de++,a(J,w,V).then(function(fe){se||(se=!0,F[ce]={status:"fulfilled",value:fe},--de||U(F))},function(fe){se||(se=!0,F[ce]={status:"rejected",reason:fe},--de||U(F))})}),--de||U(F)});return $.error&&W($.value),D.promise}})},58085:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(61812),u=s(54256),e=s(26975),f=s(41605);r({target:"Promise",stat:!0,forced:s(95758)},{all:function(M){var w=this,D=u.f(w),U=D.resolve,W=D.reject,$=e(function(){var J=c(w.resolve),F=[],X=0,de=1;f(M,function(V){var ce=X++,se=!1;de++,a(J,w,V).then(function(fe){se||(se=!0,F[ce]=fe,--de||U(F))},W)}),--de||U(F)});return $.error&&W($.value),D.promise}})},98857:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(61812),u=s(7365),e=s(54256),f=s(26975),m=s(41605),T=s(95758),M="No one promise resolved";r({target:"Promise",stat:!0,forced:T},{any:function(D){var U=this,W=u("AggregateError"),$=e.f(U),J=$.resolve,F=$.reject,X=f(function(){var de=c(U.resolve),V=[],ce=0,se=1,fe=!1;m(D,function(Te){var $e=ce++,ge=!1;se++,a(de,U,Te).then(function(Et){ge||fe||(fe=!0,J(Et))},function(Et){ge||fe||(ge=!0,V[$e]=Et,--se||F(new W(V,M)))})}),--se||F(new W(V,M))});return X.error&&F(X.value),$.promise}})},5846:(E,C,s)=>{"use strict";var r=s(90513),a=s(81124),c=s(9936).CONSTRUCTOR,u=s(46456),e=s(7365),f=s(52208),m=s(42915),T=u&&u.prototype;if(r({target:"Promise",proto:!0,forced:c,real:!0},{catch:function(w){return this.then(void 0,w)}}),!a&&f(u)){var M=e("Promise").prototype.catch;T.catch!==M&&m(T,"catch",M,{unsafe:!0})}},38206:(E,C,s)=>{"use strict";var Lr,ir,jr,r=s(90513),a=s(81124),c=s(3787),u=s(70009),e=s(25401),f=s(42915),m=s(54945),T=s(85681),M=s(58014),w=s(61812),D=s(52208),U=s(77293),W=s(54849),$=s(95869),J=s(37352).set,F=s(53460),X=s(52912),de=s(26975),V=s(70918),ce=s(91093),se=s(46456),fe=s(9936),Te=s(54256),$e="Promise",ge=fe.CONSTRUCTOR,Et=fe.REJECTION_EVENT,ot=fe.SUBCLASSING,ct=ce.getterFor($e),qe=ce.set,He=se&&se.prototype,We=se,Le=He,Pt=u.TypeError,it=u.document,Xt=u.process,cn=Te.f,pn=cn,Rn=!!(it&&it.createEvent&&u.dispatchEvent),At="unhandledrejection",br=function(kr){var Ei;return!(!U(kr)||!D(Ei=kr.then))&&Ei},ht=function(kr,Ei){var qr,Hi,Dn,ii=Ei.value,mr=1===Ei.state,pr=mr?kr.ok:kr.fail,Eo=kr.resolve,po=kr.reject,$i=kr.domain;try{pr?(mr||(2===Ei.rejection&&hr(Ei),Ei.rejection=1),!0===pr?qr=ii:($i&&$i.enter(),qr=pr(ii),$i&&($i.exit(),Dn=!0)),qr===kr.promise?po(Pt("Promise-chain cycle")):(Hi=br(qr))?e(Hi,qr,Eo,po):Eo(qr)):po(ii)}catch(Hn){$i&&!Dn&&$i.exit(),po(Hn)}},Wt=function(kr,Ei){kr.notified||(kr.notified=!0,F(function(){for(var mr,ii=kr.reactions;mr=ii.get();)ht(mr,kr);kr.notified=!1,Ei&&!kr.rejection&&wn(kr)}))},Tt=function(kr,Ei,ii){var mr,pr;Rn?((mr=it.createEvent("Event")).promise=Ei,mr.reason=ii,mr.initEvent(kr,!1,!0),u.dispatchEvent(mr)):mr={promise:Ei,reason:ii},!Et&&(pr=u["on"+kr])?pr(mr):kr===At&&X("Unhandled promise rejection",ii)},wn=function(kr){e(J,u,function(){var pr,Ei=kr.facade,ii=kr.value;if(jn(kr)&&(pr=de(function(){c?Xt.emit("unhandledRejection",ii,Ei):Tt(At,Ei,ii)}),kr.rejection=c||jn(kr)?2:1,pr.error))throw pr.value})},jn=function(kr){return 1!==kr.rejection&&!kr.parent},hr=function(kr){e(J,u,function(){var Ei=kr.facade;c?Xt.emit("rejectionHandled",Ei):Tt("rejectionhandled",Ei,kr.value)})},Oi=function(kr,Ei,ii){return function(mr){kr(Ei,mr,ii)}},Wi=function(kr,Ei,ii){kr.done||(kr.done=!0,ii&&(kr=ii),kr.value=Ei,kr.state=2,Wt(kr,!0))},so=function(kr,Ei,ii){if(!kr.done){kr.done=!0,ii&&(kr=ii);try{if(kr.facade===Ei)throw Pt("Promise can't be resolved itself");var mr=br(Ei);mr?F(function(){var pr={done:!1};try{e(mr,Ei,Oi(so,pr,kr),Oi(Wi,pr,kr))}catch(Eo){Wi(pr,Eo,kr)}}):(kr.value=Ei,kr.state=1,Wt(kr,!1))}catch(pr){Wi({done:!1},pr,kr)}}};if(ge&&(We=function(Ei){W(this,Le),w(Ei),e(Lr,this);var ii=ct(this);try{Ei(Oi(so,ii),Oi(Wi,ii))}catch(mr){Wi(ii,mr)}},(Lr=function(Ei){qe(this,{type:$e,done:!1,notified:!1,parent:!1,reactions:new V,rejection:!1,state:0,value:void 0})}).prototype=f(Le=We.prototype,"then",function(Ei,ii){var mr=ct(this),pr=cn($(this,We));return mr.parent=!0,pr.ok=!D(Ei)||Ei,pr.fail=D(ii)&&ii,pr.domain=c?Xt.domain:void 0,0===mr.state?mr.reactions.add(pr):F(function(){ht(pr,mr)}),pr.promise}),ir=function(){var kr=new Lr,Ei=ct(kr);this.promise=kr,this.resolve=Oi(so,Ei),this.reject=Oi(Wi,Ei)},Te.f=cn=function(kr){return kr===We||void 0===kr?new ir(kr):pn(kr)},!a&&D(se)&&He!==Object.prototype)){jr=He.then,ot||f(He,"then",function(Ei,ii){var mr=this;return new We(function(pr,Eo){e(jr,mr,pr,Eo)}).then(Ei,ii)},{unsafe:!0});try{delete He.constructor}catch{}m&&m(He,Le)}r({global:!0,constructor:!0,wrap:!0,forced:ge},{Promise:We}),T(We,$e,!1,!0),M($e)},30185:(E,C,s)=>{"use strict";var r=s(90513),a=s(81124),c=s(46456),u=s(55756),e=s(7365),f=s(52208),m=s(95869),T=s(25524),M=s(42915),w=c&&c.prototype;if(r({target:"Promise",proto:!0,real:!0,forced:!!c&&u(function(){w.finally.call({then:function(){}},function(){})})},{finally:function(W){var $=m(this,e("Promise")),J=f(W);return this.then(J?function(F){return T($,W()).then(function(){return F})}:W,J?function(F){return T($,W()).then(function(){throw F})}:W)}}),!a&&f(c)){var U=e("Promise").prototype.finally;w.finally!==U&&M(w,"finally",U,{unsafe:!0})}},66793:(E,C,s)=>{"use strict";s(38206),s(58085),s(5846),s(44738),s(74767),s(4991)},44738:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(61812),u=s(54256),e=s(26975),f=s(41605);r({target:"Promise",stat:!0,forced:s(95758)},{race:function(M){var w=this,D=u.f(w),U=D.reject,W=e(function(){var $=c(w.resolve);f(M,function(J){a($,w,J).then(D.resolve,U)})});return W.error&&U(W.value),D.promise}})},74767:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(54256);r({target:"Promise",stat:!0,forced:s(9936).CONSTRUCTOR},{reject:function(f){var m=c.f(this);return a(m.reject,void 0,f),m.promise}})},4991:(E,C,s)=>{"use strict";var r=s(90513),a=s(7365),c=s(81124),u=s(46456),e=s(9936).CONSTRUCTOR,f=s(25524),m=a("Promise"),T=c&&!e;r({target:"Promise",stat:!0,forced:c||e},{resolve:function(w){return f(T&&this===m?u:this,w)}})},19539:(E,C,s)=>{"use strict";var r=s(90513),a=s(7365),c=s(2543),u=s(44197),e=s(54356),f=s(64562),m=s(77293),T=s(83272),M=s(55756),w=a("Reflect","construct"),D=Object.prototype,U=[].push,W=M(function(){function F(){}return!(w(function(){},[],F)instanceof F)}),$=!M(function(){w(function(){})}),J=W||$;r({target:"Reflect",stat:!0,forced:J,sham:J},{construct:function(X,de){e(X),f(de);var V=arguments.length<3?X:e(arguments[2]);if($&&!W)return w(X,de,V);if(X===V){switch(de.length){case 0:return new X;case 1:return new X(de[0]);case 2:return new X(de[0],de[1]);case 3:return new X(de[0],de[1],de[2]);case 4:return new X(de[0],de[1],de[2],de[3])}var ce=[null];return c(U,ce,de),new(c(u,X,ce))}var se=V.prototype,fe=T(m(se)?se:D),Te=c(X,fe,de);return m(Te)?Te:fe}})},60851:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(77293),u=s(64562),e=s(27029),f=s(25525),m=s(31426);r({target:"Reflect",stat:!0},{get:function T(M,w){var U,W,D=arguments.length<3?M:arguments[2];return u(M)===D?M[w]:(U=f.f(M,w))?e(U)?U.value:void 0===U.get?void 0:a(U.get,D):c(W=m(M))?T(W,w,D):void 0}})},44864:()=>{},97764:(E,C,s)=>{"use strict";var r=s(90513),a=s(23634),c=s(56421),u=s(67917),e=s(41433),f=s(79668),m=a("".indexOf);r({target:"String",proto:!0,forced:!f("includes")},{includes:function(M){return!!~m(e(u(this)),e(c(M)),arguments.length>1?arguments[1]:void 0)}})},3934:(E,C,s)=>{"use strict";var r=s(61557).charAt,a=s(41433),c=s(91093),u=s(79077),e=s(28738),f="String Iterator",m=c.set,T=c.getterFor(f);u(String,"String",function(M){m(this,{type:f,string:a(M),index:0})},function(){var W,w=T(this),D=w.string,U=w.index;return U>=D.length?e(void 0,!0):(W=r(D,U),w.index+=W.length,e(W,!1))})},3588:(E,C,s)=>{"use strict";s(90513)({target:"String",proto:!0},{repeat:s(53411)})},24655:(E,C,s)=>{"use strict";var J,r=s(90513),a=s(64350),c=s(25525).f,u=s(48869),e=s(41433),f=s(56421),m=s(67917),T=s(79668),M=s(81124),w=a("".startsWith),D=a("".slice),U=Math.min,W=T("startsWith");r({target:"String",proto:!0,forced:!(!M&&!W&&(J=c(String.prototype,"startsWith"),J&&!J.writable)||W)},{startsWith:function(F){var X=e(m(this));f(F);var de=u(U(arguments.length>1?arguments[1]:void 0,X.length)),V=e(F);return w?w(X,V,de):D(X,de,de+V.length)===V}})},90451:(E,C,s)=>{"use strict";var r=s(90513),a=s(89858).trim;r({target:"String",proto:!0,forced:s(85462)("trim")},{trim:function(){return a(this)}})},16426:(E,C,s)=>{"use strict";s(25374)("asyncIterator")},17858:(E,C,s)=>{"use strict";var r=s(90513),a=s(70009),c=s(25401),u=s(23634),e=s(81124),f=s(49642),m=s(98535),T=s(55756),M=s(80112),w=s(23336),D=s(64562),U=s(81010),W=s(62939),$=s(41433),J=s(51361),F=s(83272),X=s(28474),de=s(51518),V=s(62469),ce=s(47238),se=s(25525),fe=s(48011),Te=s(25913),$e=s(25558),ge=s(42915),Et=s(1707),ot=s(64579),ct=s(86066),qe=s(45599),He=s(13708),We=s(91840),Le=s(89734),Pt=s(25374),it=s(56992),Xt=s(85681),cn=s(91093),pn=s(68607).forEach,Rn=ct("hidden"),At="Symbol",qt="prototype",sn=cn.set,fn=cn.getterFor(At),xn=Object[qt],Kr=a.Symbol,Or=Kr&&Kr[qt],Lr=a.TypeError,ir=a.QObject,Qr=se.f,jr=fe.f,br=V.f,ht=$e.f,Wt=u([].push),Tt=ot("symbols"),wn=ot("op-symbols"),jn=ot("wks"),hr=!ir||!ir[qt]||!ir[qt].findChild,Oi=f&&T(function(){return 7!==F(jr({},"a",{get:function(){return jr(this,"a",{value:7}).a}})).a})?function(po,$i,qr){var Hi=Qr(xn,$i);Hi&&delete xn[$i],jr(po,$i,qr),Hi&&po!==xn&&jr(xn,$i,Hi)}:jr,Wi=function(po,$i){var qr=Tt[po]=F(Or);return sn(qr,{type:At,tag:po,description:$i}),f||(qr.description=$i),qr},so=function($i,qr,Hi){$i===xn&&so(wn,qr,Hi),D($i);var Dn=W(qr);return D(Hi),M(Tt,Dn)?(Hi.enumerable?(M($i,Rn)&&$i[Rn][Dn]&&($i[Rn][Dn]=!1),Hi=F(Hi,{enumerable:J(0,!1)})):(M($i,Rn)||jr($i,Rn,J(1,{})),$i[Rn][Dn]=!0),Oi($i,Dn,Hi)):jr($i,Dn,Hi)},kr=function($i,qr){D($i);var Hi=U(qr),Dn=X(Hi).concat(Eo(Hi));return pn(Dn,function(Hn){(!f||c(ii,Hi,Hn))&&so($i,Hn,Hi[Hn])}),$i},ii=function($i){var qr=W($i),Hi=c(ht,this,qr);return!(this===xn&&M(Tt,qr)&&!M(wn,qr))&&(!(Hi||!M(this,qr)||!M(Tt,qr)||M(this,Rn)&&this[Rn][qr])||Hi)},mr=function($i,qr){var Hi=U($i),Dn=W(qr);if(Hi!==xn||!M(Tt,Dn)||M(wn,Dn)){var Hn=Qr(Hi,Dn);return Hn&&M(Tt,Dn)&&!(M(Hi,Rn)&&Hi[Rn][Dn])&&(Hn.enumerable=!0),Hn}},pr=function($i){var qr=br(U($i)),Hi=[];return pn(qr,function(Dn){!M(Tt,Dn)&&!M(qe,Dn)&&Wt(Hi,Dn)}),Hi},Eo=function(po){var $i=po===xn,qr=br($i?wn:U(po)),Hi=[];return pn(qr,function(Dn){M(Tt,Dn)&&(!$i||M(xn,Dn))&&Wt(Hi,Tt[Dn])}),Hi};m||(ge(Or=(Kr=function(){if(w(Or,this))throw Lr("Symbol is not a constructor");var $i=arguments.length&&void 0!==arguments[0]?$(arguments[0]):void 0,qr=He($i),Hi=function(Dn){this===xn&&c(Hi,wn,Dn),M(this,Rn)&&M(this[Rn],qr)&&(this[Rn][qr]=!1),Oi(this,qr,J(1,Dn))};return f&&hr&&Oi(xn,qr,{configurable:!0,set:Hi}),Wi(qr,$i)})[qt],"toString",function(){return fn(this).tag}),ge(Kr,"withoutSetter",function(po){return Wi(He(po),po)}),$e.f=ii,fe.f=so,Te.f=kr,se.f=mr,de.f=V.f=pr,ce.f=Eo,Le.f=function(po){return Wi(We(po),po)},f&&(Et(Or,"description",{configurable:!0,get:function(){return fn(this).description}}),e||ge(xn,"propertyIsEnumerable",ii,{unsafe:!0}))),r({global:!0,constructor:!0,wrap:!0,forced:!m,sham:!m},{Symbol:Kr}),pn(X(jn),function(po){Pt(po)}),r({target:At,stat:!0,forced:!m},{useSetter:function(){hr=!0},useSimple:function(){hr=!1}}),r({target:"Object",stat:!0,forced:!m,sham:!f},{create:function($i,qr){return void 0===qr?F($i):kr(F($i),qr)},defineProperty:so,defineProperties:kr,getOwnPropertyDescriptor:mr}),r({target:"Object",stat:!0,forced:!m},{getOwnPropertyNames:pr}),it(),Xt(Kr,At),qe[Rn]=!0},1172:()=>{},12353:(E,C,s)=>{"use strict";var r=s(90513),a=s(7365),c=s(80112),u=s(41433),e=s(64579),f=s(56709),m=e("string-to-symbol-registry"),T=e("symbol-to-string-registry");r({target:"Symbol",stat:!0,forced:!f},{for:function(M){var w=u(M);if(c(m,w))return m[w];var D=a("Symbol")(w);return m[w]=D,T[D]=w,D}})},99579:(E,C,s)=>{"use strict";s(25374)("hasInstance")},41258:(E,C,s)=>{"use strict";s(25374)("isConcatSpreadable")},2383:(E,C,s)=>{"use strict";s(25374)("iterator")},56728:(E,C,s)=>{"use strict";s(17858),s(12353),s(27632),s(75071),s(37764)},27632:(E,C,s)=>{"use strict";var r=s(90513),a=s(80112),c=s(74717),u=s(7378),e=s(64579),f=s(56709),m=e("symbol-to-string-registry");r({target:"Symbol",stat:!0,forced:!f},{keyFor:function(M){if(!c(M))throw TypeError(u(M)+" is not a symbol");if(a(m,M))return m[M]}})},64776:(E,C,s)=>{"use strict";s(25374)("matchAll")},44339:(E,C,s)=>{"use strict";s(25374)("match")},88215:(E,C,s)=>{"use strict";s(25374)("replace")},65389:(E,C,s)=>{"use strict";s(25374)("search")},12733:(E,C,s)=>{"use strict";s(25374)("species")},97977:(E,C,s)=>{"use strict";s(25374)("split")},59792:(E,C,s)=>{"use strict";var r=s(25374),a=s(56992);r("toPrimitive"),a()},60242:(E,C,s)=>{"use strict";var r=s(7365),a=s(25374),c=s(85681);a("toStringTag"),c(r("Symbol"),"Symbol")},26291:(E,C,s)=>{"use strict";s(25374)("unscopables")},67670:(E,C,s)=>{"use strict";s(10901)},43548:(E,C,s)=>{"use strict";var r=s(91840),a=s(48011).f,c=r("metadata"),u=Function.prototype;void 0===u[c]&&a(u,c,{value:null})},10509:(E,C,s)=>{"use strict";var r=s(90513),a=s(64902),c=s(60077).remove;r({target:"Map",proto:!0,real:!0,forced:!0},{deleteAll:function(){for(var m,e=a(this),f=!0,T=0,M=arguments.length;T{"use strict";var r=s(90513),a=s(64902),c=s(60077),u=c.get,e=c.has,f=c.set;r({target:"Map",proto:!0,real:!0,forced:!0},{emplace:function(T,M){var D,U,w=a(this);return e(w,T)?(D=u(w,T),"update"in M&&(D=M.update(D,T,w),f(w,T,D)),D):(U=M.insert(T,w),f(w,T,U),U)}})},54547:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(21515);r({target:"Map",proto:!0,real:!0,forced:!0},{every:function(f){var m=c(this),T=a(f,arguments.length>1?arguments[1]:void 0);return!1!==u(m,function(M,w){if(!T(M,w,m))return!1},!0)}})},68996:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(60077),e=s(21515),f=u.Map,m=u.set;r({target:"Map",proto:!0,real:!0,forced:!0},{filter:function(M){var w=c(this),D=a(M,arguments.length>1?arguments[1]:void 0),U=new f;return e(w,function(W,$){D(W,$,w)&&m(U,$,W)}),U}})},60176:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(21515);r({target:"Map",proto:!0,real:!0,forced:!0},{findKey:function(f){var m=c(this),T=a(f,arguments.length>1?arguments[1]:void 0),M=u(m,function(w,D){if(T(w,D,m))return{key:D}},!0);return M&&M.key}})},1530:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(21515);r({target:"Map",proto:!0,real:!0,forced:!0},{find:function(f){var m=c(this),T=a(f,arguments.length>1?arguments[1]:void 0),M=u(m,function(w,D){if(T(w,D,m))return{value:w}},!0);return M&&M.value}})},78271:(E,C,s)=>{"use strict";s(90513)({target:"Map",stat:!0,forced:!0},{from:s(83483)})},41554:(E,C,s)=>{"use strict";var r=s(90513),a=s(23634),c=s(61812),u=s(67917),e=s(41605),f=s(60077),m=s(81124),T=f.Map,M=f.has,w=f.get,D=f.set,U=a([].push);r({target:"Map",stat:!0,forced:m},{groupBy:function($,J){u($),c(J);var F=new T,X=0;return e($,function(de){var V=J(de,X++);M(F,V)?U(w(F,V),de):D(F,V,[de])}),F}})},41688:(E,C,s)=>{"use strict";var r=s(90513),a=s(29627),c=s(64902),u=s(21515);r({target:"Map",proto:!0,real:!0,forced:!0},{includes:function(f){return!0===u(c(this),function(m){if(a(m,f))return!0},!0)}})},92847:(E,C,s)=>{"use strict";var r=s(90513),a=s(25401),c=s(41605),u=s(52208),e=s(61812),f=s(60077).Map;r({target:"Map",stat:!0,forced:!0},{keyBy:function(T,M){var D=new(u(this)?this:f);e(M);var U=e(D.set);return c(T,function(W){a(U,D,M(W),W)}),D}})},17316:(E,C,s)=>{"use strict";var r=s(90513),a=s(64902),c=s(21515);r({target:"Map",proto:!0,real:!0,forced:!0},{keyOf:function(e){var f=c(a(this),function(m,T){if(m===e)return{key:T}},!0);return f&&f.key}})},58786:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(60077),e=s(21515),f=u.Map,m=u.set;r({target:"Map",proto:!0,real:!0,forced:!0},{mapKeys:function(M){var w=c(this),D=a(M,arguments.length>1?arguments[1]:void 0),U=new f;return e(w,function(W,$){m(U,D(W,$,w),W)}),U}})},51943:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(60077),e=s(21515),f=u.Map,m=u.set;r({target:"Map",proto:!0,real:!0,forced:!0},{mapValues:function(M){var w=c(this),D=a(M,arguments.length>1?arguments[1]:void 0),U=new f;return e(w,function(W,$){m(U,$,D(W,$,w))}),U}})},12783:(E,C,s)=>{"use strict";var r=s(90513),a=s(64902),c=s(41605),u=s(60077).set;r({target:"Map",proto:!0,real:!0,arity:1,forced:!0},{merge:function(f){for(var m=a(this),T=arguments.length,M=0;M{"use strict";s(90513)({target:"Map",stat:!0,forced:!0},{of:s(13067)})},69773:(E,C,s)=>{"use strict";var r=s(90513),a=s(61812),c=s(64902),u=s(21515),e=TypeError;r({target:"Map",proto:!0,real:!0,forced:!0},{reduce:function(m){var T=c(this),M=arguments.length<2,w=M?void 0:arguments[1];if(a(m),u(T,function(D,U){M?(M=!1,w=D):w=m(w,D,U,T)}),M)throw e("Reduce of empty map with no initial value");return w}})},22337:(E,C,s)=>{"use strict";var r=s(90513),a=s(76781),c=s(64902),u=s(21515);r({target:"Map",proto:!0,real:!0,forced:!0},{some:function(f){var m=c(this),T=a(f,arguments.length>1?arguments[1]:void 0);return!0===u(m,function(M,w){if(T(M,w,m))return!0},!0)}})},84131:(E,C,s)=>{"use strict";s(90513)({target:"Map",proto:!0,real:!0,name:"upsert",forced:!0},{updateOrInsert:s(57729)})},40199:(E,C,s)=>{"use strict";var r=s(90513),a=s(61812),c=s(64902),u=s(60077),e=TypeError,f=u.get,m=u.has,T=u.set;r({target:"Map",proto:!0,real:!0,forced:!0},{update:function(w,D){var U=c(this),W=arguments.length;a(D);var $=m(U,w);if(!$&&W<3)throw e("Updating absent value");var J=$?f(U,w):a(W>2?arguments[2]:void 0)(w,U);return T(U,w,D(J,w,U)),U}})},69046:(E,C,s)=>{"use strict";s(90513)({target:"Map",proto:!0,real:!0,forced:!0},{upsert:s(57729)})},61127:(E,C,s)=>{"use strict";s(84798)},45975:(E,C,s)=>{"use strict";s(98857)},93114:(E,C,s)=>{"use strict";var r=s(90513),a=s(54256),c=s(26975);r({target:"Promise",stat:!0,forced:!0},{try:function(u){var e=a.f(this),f=c(u);return(f.error?e.reject:e.resolve)(f.value),e.promise}})},68333:(E,C,s)=>{"use strict";var r=s(90513),a=s(54256);r({target:"Promise",stat:!0},{withResolvers:function(){var u=a.f(this);return{promise:u.promise,resolve:u.resolve,reject:u.reject}}})},55461:(E,C,s)=>{"use strict";s(25374)("asyncDispose")},5737:(E,C,s)=>{"use strict";s(25374)("dispose")},70337:(E,C,s)=>{"use strict";s(90513)({target:"Symbol",stat:!0},{isRegisteredSymbol:s(86475)})},61652:(E,C,s)=>{"use strict";s(90513)({target:"Symbol",stat:!0,name:"isRegisteredSymbol"},{isRegistered:s(86475)})},44388:(E,C,s)=>{"use strict";s(90513)({target:"Symbol",stat:!0,forced:!0},{isWellKnownSymbol:s(74110)})},90791:(E,C,s)=>{"use strict";s(90513)({target:"Symbol",stat:!0,name:"isWellKnownSymbol",forced:!0},{isWellKnown:s(74110)})},87097:(E,C,s)=>{"use strict";s(25374)("matcher")},29559:(E,C,s)=>{"use strict";s(25374)("metadataKey")},71985:(E,C,s)=>{"use strict";s(25374)("metadata")},90212:(E,C,s)=>{"use strict";s(25374)("observable")},93770:(E,C,s)=>{"use strict";s(25374)("patternMatch")},47743:(E,C,s)=>{"use strict";s(25374)("replaceAll")},33089:(E,C,s)=>{"use strict";s(1285);var r=s(44125),a=s(70009),c=s(35329),u=s(65162),e=s(84394),m=s(91840)("toStringTag");for(var T in r){var M=a[T],w=M&&M.prototype;w&&c(w)!==m&&u(w,m,T),e[T]=e.Array}},94784:(E,C,s)=>{"use strict";var r=s(90513),a=s(70009),u=s(53814)(a.setInterval,!0);r({global:!0,bind:!0,forced:a.setInterval!==u},{setInterval:u})},36445:(E,C,s)=>{"use strict";var r=s(90513),a=s(70009),u=s(53814)(a.setTimeout,!0);r({global:!0,bind:!0,forced:a.setTimeout!==u},{setTimeout:u})},69280:(E,C,s)=>{"use strict";s(94784),s(36445)},73842:(E,C,s)=>{"use strict";s(1285);var r=s(90513),a=s(70009),c=s(25401),u=s(23634),e=s(49642),f=s(54933),m=s(42915),T=s(1707),M=s(84604),w=s(85681),D=s(14554),U=s(91093),W=s(54849),$=s(52208),J=s(80112),F=s(76781),X=s(35329),de=s(64562),V=s(77293),ce=s(41433),se=s(83272),fe=s(51361),Te=s(88055),$e=s(34014),ge=s(15086),Et=s(91840),ot=s(84865),ct=Et("iterator"),qe="URLSearchParams",He=qe+"Iterator",We=U.set,Le=U.getterFor(qe),Pt=U.getterFor(He),it=Object.getOwnPropertyDescriptor,Xt=function(Dn){if(!e)return a[Dn];var Hn=it(a,Dn);return Hn&&Hn.value},cn=Xt("fetch"),pn=Xt("Request"),Rn=Xt("Headers"),At=pn&&pn.prototype,qt=Rn&&Rn.prototype,sn=a.RegExp,fn=a.TypeError,xn=a.decodeURIComponent,Kr=a.encodeURIComponent,Or=u("".charAt),Lr=u([].join),ir=u([].push),Qr=u("".replace),jr=u([].shift),br=u([].splice),ht=u("".split),Wt=u("".slice),Tt=/\+/g,wn=Array(4),jn=function(Dn){return wn[Dn-1]||(wn[Dn-1]=sn("((?:%[\\da-f]{2}){"+Dn+"})","gi"))},hr=function(Dn){try{return xn(Dn)}catch{return Dn}},Oi=function(Dn){var Hn=Qr(Dn,Tt," "),jt=4;try{return xn(Hn)}catch{for(;jt;)Hn=Qr(Hn,jn(jt--),hr);return Hn}},Wi=/[!'()~]|%20/g,so={"!":"%21","'":"%27","(":"%28",")":"%29","~":"%7E","%20":"+"},kr=function(Dn){return so[Dn]},Ei=function(Dn){return Qr(Kr(Dn),Wi,kr)},ii=D(function(Hn,jt){We(this,{type:He,iterator:Te(Le(Hn).entries),kind:jt})},"Iterator",function(){var Hn=Pt(this),jt=Hn.kind,Fe=Hn.iterator.next(),Ie=Fe.value;return Fe.done||(Fe.value="keys"===jt?Ie.key:"values"===jt?Ie.value:[Ie.key,Ie.value]),Fe},!0),mr=function(Dn){this.entries=[],this.url=null,void 0!==Dn&&(V(Dn)?this.parseObject(Dn):this.parseQuery("string"==typeof Dn?"?"===Or(Dn,0)?Wt(Dn,1):Dn:ce(Dn)))};mr.prototype={type:qe,bindURL:function(Dn){this.url=Dn,this.update()},parseObject:function(Dn){var jt,Fe,Ie,et,ze,an,lt,Hn=$e(Dn);if(Hn)for(Fe=(jt=Te(Dn,Hn)).next;!(Ie=c(Fe,jt)).done;){if(et=Te(de(Ie.value)),(an=c(ze=et.next,et)).done||(lt=c(ze,et)).done||!c(ze,et).done)throw fn("Expected sequence with length 2");ir(this.entries,{key:ce(an.value),value:ce(lt.value)})}else for(var Rt in Dn)J(Dn,Rt)&&ir(this.entries,{key:Rt,value:ce(Dn[Rt])})},parseQuery:function(Dn){if(Dn)for(var Fe,Ie,Hn=ht(Dn,"&"),jt=0;jt0?arguments[0]:void 0));e||(this.size=jt.entries.length)},Eo=pr.prototype;if(M(Eo,{append:function(Hn,jt){var Fe=Le(this);ge(arguments.length,2),ir(Fe.entries,{key:ce(Hn),value:ce(jt)}),e||this.length++,Fe.updateURL()},delete:function(Dn){for(var Hn=Le(this),jt=ge(arguments.length,1),Fe=Hn.entries,Ie=ce(Dn),et=jt<2?void 0:arguments[1],ze=void 0===et?et:ce(et),an=0;anFe.key?1:-1}),Hn.updateURL()},forEach:function(Hn){for(var et,jt=Le(this).entries,Fe=F(Hn,arguments.length>1?arguments[1]:void 0),Ie=0;Ie1?qr(arguments[1]):{})}}),$(pn)){var Hi=function(Hn){return W(this,At),new pn(Hn,arguments.length>1?qr(arguments[1]):{})};At.constructor=Hi,Hi.prototype=At,r({global:!0,constructor:!0,dontCallGetSet:!0,forced:!0},{Request:Hi})}}E.exports={URLSearchParams:pr,getState:Le}},56247:()=>{},82842:()=>{},26953:(E,C,s)=>{"use strict";s(73842)},86023:()=>{},37256:(E,C,s)=>{"use strict";var r=s(90513),a=s(7365),c=s(55756),u=s(15086),e=s(41433),f=s(54933),m=a("URL");r({target:"URL",stat:!0,forced:!(f&&c(function(){m.canParse()}))},{canParse:function(w){var D=u(arguments.length,1),U=e(w),W=D<2||void 0===arguments[1]?void 0:e(arguments[1]);try{return!!new m(U,W)}catch{return!1}}})},80504:(E,C,s)=>{"use strict";s(3934);var Wi,r=s(90513),a=s(49642),c=s(54933),u=s(70009),e=s(76781),f=s(23634),m=s(42915),T=s(1707),M=s(54849),w=s(80112),D=s(75791),U=s(51923),W=s(8681),$=s(61557).codeAt,J=s(26662),F=s(41433),X=s(85681),de=s(15086),V=s(73842),ce=s(91093),se=ce.set,fe=ce.getterFor("URL"),Te=V.URLSearchParams,$e=V.getState,ge=u.URL,Et=u.TypeError,ot=u.parseInt,ct=Math.floor,qe=Math.pow,He=f("".charAt),We=f(/./.exec),Le=f([].join),Pt=f(1..toString),it=f([].pop),Xt=f([].push),cn=f("".replace),pn=f([].shift),Rn=f("".split),At=f("".slice),qt=f("".toLowerCase),sn=f([].unshift),xn="Invalid scheme",Kr="Invalid host",Or="Invalid port",Lr=/[a-z]/i,ir=/[\d+-.a-z]/i,Qr=/\d/,jr=/^0x/i,br=/^[0-7]+$/,ht=/^\d+$/,Wt=/^[\da-f]+$/i,Tt=/[\0\t\n\r #%/:<>?@[\\\]^|]/,wn=/[\0\t\n\r #/:<>?@[\\\]^|]/,jn=/^[\u0000-\u0020]+/,hr=/(^|[^\u0000-\u0020])[\u0000-\u0020]+$/,Oi=/[\t\n\r]/g,ii=function(dr){var Ni,ti,Vr,wi;if("number"==typeof dr){for(Ni=[],ti=0;ti<4;ti++)sn(Ni,dr%256),dr=ct(dr/256);return Le(Ni,".")}if("object"==typeof dr){for(Ni="",Vr=function(dr){for(var Ni=null,ti=1,Vr=null,wi=0,ji=0;ji<8;ji++)0!==dr[ji]?(wi>ti&&(Ni=Vr,ti=wi),Vr=null,wi=0):(null===Vr&&(Vr=ji),++wi);return wi>ti&&(Ni=Vr,ti=wi),Ni}(dr),ti=0;ti<8;ti++)wi&&0===dr[ti]||(wi&&(wi=!1),Vr===ti?(Ni+=ti?":":"::",wi=!0):(Ni+=Pt(dr[ti],16),ti<7&&(Ni+=":")));return"["+Ni+"]"}return dr},mr={},pr=D({},mr,{" ":1,'"':1,"<":1,">":1,"`":1}),Eo=D({},pr,{"#":1,"?":1,"{":1,"}":1}),po=D({},Eo,{"/":1,":":1,";":1,"=":1,"@":1,"[":1,"\\":1,"]":1,"^":1,"|":1}),$i=function(dr,Ni){var ti=$(dr,0);return ti>32&&ti<127&&!w(Ni,dr)?dr:encodeURIComponent(dr)},qr={ftp:21,file:null,http:80,https:443,ws:80,wss:443},Hi=function(dr,Ni){var ti;return 2===dr.length&&We(Lr,He(dr,0))&&(":"===(ti=He(dr,1))||!Ni&&"|"===ti)},Dn=function(dr){var Ni;return dr.length>1&&Hi(At(dr,0,2))&&(2===dr.length||"/"===(Ni=He(dr,2))||"\\"===Ni||"?"===Ni||"#"===Ni)},Hn=function(dr){return"."===dr||"%2e"===qt(dr)},jt=function(dr){return".."===(dr=qt(dr))||"%2e."===dr||".%2e"===dr||"%2e%2e"===dr},Fe={},Ie={},et={},ze={},an={},lt={},Rt={},Pe={},qn={},gr={},Pn={},_r={},Pr={},tr={},Zn={},nr={},Zt={},dn={},Ge={},Ot={},mn={},wr=function(dr,Ni,ti){var wi,ji,Vi,Vr=F(dr);if(Ni){if(ji=this.parse(Vr))throw Et(ji);this.searchParams=null}else{if(void 0!==ti&&(wi=new wr(ti,!0)),ji=this.parse(Vr,null,wi))throw Et(ji);(Vi=$e(new Te)).bindURL(this),this.searchParams=Vi}};wr.prototype={type:"URL",parse:function(dr,Ni,ti){var ro,Vt,bn,Bn,Vr=this,wi=Ni||Fe,ji=0,Vi="",Po=!1,ko=!1,Ir=!1;for(dr=F(dr),Ni||(Vr.scheme="",Vr.username="",Vr.password="",Vr.host=null,Vr.port=null,Vr.path=[],Vr.query=null,Vr.fragment=null,Vr.cannotBeABaseURL=!1,dr=cn(dr,jn,""),dr=cn(dr,hr,"$1")),dr=cn(dr,Oi,""),ro=U(dr);ji<=ro.length;){switch(Vt=ro[ji],wi){case Fe:if(!Vt||!We(Lr,Vt)){if(Ni)return xn;wi=et;continue}Vi+=qt(Vt),wi=Ie;break;case Ie:if(Vt&&(We(ir,Vt)||"+"===Vt||"-"===Vt||"."===Vt))Vi+=qt(Vt);else{if(":"!==Vt){if(Ni)return xn;Vi="",wi=et,ji=0;continue}if(Ni&&(Vr.isSpecial()!==w(qr,Vi)||"file"===Vi&&(Vr.includesCredentials()||null!==Vr.port)||"file"===Vr.scheme&&!Vr.host))return;if(Vr.scheme=Vi,Ni)return void(Vr.isSpecial()&&qr[Vr.scheme]===Vr.port&&(Vr.port=null));Vi="","file"===Vr.scheme?wi=tr:Vr.isSpecial()&&ti&&ti.scheme===Vr.scheme?wi=ze:Vr.isSpecial()?wi=Pe:"/"===ro[ji+1]?(wi=an,ji++):(Vr.cannotBeABaseURL=!0,Xt(Vr.path,""),wi=Ge)}break;case et:if(!ti||ti.cannotBeABaseURL&&"#"!==Vt)return xn;if(ti.cannotBeABaseURL&&"#"===Vt){Vr.scheme=ti.scheme,Vr.path=W(ti.path),Vr.query=ti.query,Vr.fragment="",Vr.cannotBeABaseURL=!0,wi=mn;break}wi="file"===ti.scheme?tr:lt;continue;case ze:if("/"!==Vt||"/"!==ro[ji+1]){wi=lt;continue}wi=qn,ji++;break;case an:if("/"===Vt){wi=gr;break}wi=dn;continue;case lt:if(Vr.scheme=ti.scheme,Vt===Wi)Vr.username=ti.username,Vr.password=ti.password,Vr.host=ti.host,Vr.port=ti.port,Vr.path=W(ti.path),Vr.query=ti.query;else if("/"===Vt||"\\"===Vt&&Vr.isSpecial())wi=Rt;else if("?"===Vt)Vr.username=ti.username,Vr.password=ti.password,Vr.host=ti.host,Vr.port=ti.port,Vr.path=W(ti.path),Vr.query="",wi=Ot;else{if("#"!==Vt){Vr.username=ti.username,Vr.password=ti.password,Vr.host=ti.host,Vr.port=ti.port,Vr.path=W(ti.path),Vr.path.length--,wi=dn;continue}Vr.username=ti.username,Vr.password=ti.password,Vr.host=ti.host,Vr.port=ti.port,Vr.path=W(ti.path),Vr.query=ti.query,Vr.fragment="",wi=mn}break;case Rt:if(!Vr.isSpecial()||"/"!==Vt&&"\\"!==Vt){if("/"!==Vt){Vr.username=ti.username,Vr.password=ti.password,Vr.host=ti.host,Vr.port=ti.port,wi=dn;continue}wi=gr}else wi=qn;break;case Pe:if(wi=qn,"/"!==Vt||"/"!==He(Vi,ji+1))continue;ji++;break;case qn:if("/"!==Vt&&"\\"!==Vt){wi=gr;continue}break;case gr:if("@"===Vt){Po&&(Vi="%40"+Vi),Po=!0,bn=U(Vi);for(var ci=0;ci65535)return Or;Vr.port=Vr.isSpecial()&&es===qr[Vr.scheme]?null:es,Vi=""}if(Ni)return;wi=Zt;continue}return Or}Vi+=Vt;break;case tr:if(Vr.scheme="file","/"===Vt||"\\"===Vt)wi=Zn;else{if(!ti||"file"!==ti.scheme){wi=dn;continue}switch(Vt){case Wi:Vr.host=ti.host,Vr.path=W(ti.path),Vr.query=ti.query;break;case"?":Vr.host=ti.host,Vr.path=W(ti.path),Vr.query="",wi=Ot;break;case"#":Vr.host=ti.host,Vr.path=W(ti.path),Vr.query=ti.query,Vr.fragment="",wi=mn;break;default:Dn(Le(W(ro,ji),""))||(Vr.host=ti.host,Vr.path=W(ti.path),Vr.shortenPath()),wi=dn;continue}}break;case Zn:if("/"===Vt||"\\"===Vt){wi=nr;break}ti&&"file"===ti.scheme&&!Dn(Le(W(ro,ji),""))&&(Hi(ti.path[0],!0)?Xt(Vr.path,ti.path[0]):Vr.host=ti.host),wi=dn;continue;case nr:if(Vt===Wi||"/"===Vt||"\\"===Vt||"?"===Vt||"#"===Vt){if(!Ni&&Hi(Vi))wi=dn;else if(""===Vi){if(Vr.host="",Ni)return;wi=Zt}else{if(Bn=Vr.parseHost(Vi))return Bn;if("localhost"===Vr.host&&(Vr.host=""),Ni)return;Vi="",wi=Zt}continue}Vi+=Vt;break;case Zt:if(Vr.isSpecial()){if(wi=dn,"/"!==Vt&&"\\"!==Vt)continue}else if(Ni||"?"!==Vt)if(Ni||"#"!==Vt){if(Vt!==Wi&&(wi=dn,"/"!==Vt))continue}else Vr.fragment="",wi=mn;else Vr.query="",wi=Ot;break;case dn:if(Vt===Wi||"/"===Vt||"\\"===Vt&&Vr.isSpecial()||!Ni&&("?"===Vt||"#"===Vt)){if(jt(Vi)?(Vr.shortenPath(),"/"!==Vt&&!("\\"===Vt&&Vr.isSpecial())&&Xt(Vr.path,"")):Hn(Vi)?"/"!==Vt&&!("\\"===Vt&&Vr.isSpecial())&&Xt(Vr.path,""):("file"===Vr.scheme&&!Vr.path.length&&Hi(Vi)&&(Vr.host&&(Vr.host=""),Vi=He(Vi,0)+":"),Xt(Vr.path,Vi)),Vi="","file"===Vr.scheme&&(Vt===Wi||"?"===Vt||"#"===Vt))for(;Vr.path.length>1&&""===Vr.path[0];)pn(Vr.path);"?"===Vt?(Vr.query="",wi=Ot):"#"===Vt&&(Vr.fragment="",wi=mn)}else Vi+=$i(Vt,Eo);break;case Ge:"?"===Vt?(Vr.query="",wi=Ot):"#"===Vt?(Vr.fragment="",wi=mn):Vt!==Wi&&(Vr.path[0]+=$i(Vt,mr));break;case Ot:Ni||"#"!==Vt?Vt!==Wi&&("'"===Vt&&Vr.isSpecial()?Vr.query+="%27":Vr.query+="#"===Vt?"%23":$i(Vt,mr)):(Vr.fragment="",wi=mn);break;case mn:Vt!==Wi&&(Vr.fragment+=$i(Vt,pr))}ji++}},parseHost:function(dr){var Ni,ti,Vr;if("["===He(dr,0)){if("]"!==He(dr,dr.length-1)||(Ni=function(dr){var ji,Vi,Po,ko,Ir,ro,Vt,Ni=[0,0,0,0,0,0,0,0],ti=0,Vr=null,wi=0,bn=function(){return He(dr,wi)};if(":"===bn()){if(":"!==He(dr,1))return;wi+=2,Vr=++ti}for(;bn();){if(8===ti)return;if(":"!==bn()){for(ji=Vi=0;Vi<4&&We(Wt,bn());)ji=16*ji+ot(bn(),16),wi++,Vi++;if("."===bn()){if(0===Vi||(wi-=Vi,ti>6))return;for(Po=0;bn();){if(ko=null,Po>0){if(!("."===bn()&&Po<4))return;wi++}if(!We(Qr,bn()))return;for(;We(Qr,bn());){if(Ir=ot(bn(),10),null===ko)ko=Ir;else{if(0===ko)return;ko=10*ko+Ir}if(ko>255)return;wi++}Ni[ti]=256*Ni[ti]+ko,(2==++Po||4===Po)&&ti++}if(4!==Po)return;break}if(":"===bn()){if(wi++,!bn())return}else if(bn())return;Ni[ti++]=ji}else{if(null!==Vr)return;wi++,Vr=++ti}}if(null!==Vr)for(ro=ti-Vr,ti=7;0!==ti&&ro>0;)Vt=Ni[ti],Ni[ti--]=Ni[Vr+ro-1],Ni[Vr+--ro]=Vt;else if(8!==ti)return;return Ni}(At(dr,1,-1)),!Ni))return Kr;this.host=Ni}else if(this.isSpecial()){if(dr=J(dr),We(Tt,dr)||(Ni=function(dr){var ti,Vr,wi,ji,Vi,Po,ko,Ni=Rn(dr,".");if(Ni.length&&""===Ni[Ni.length-1]&&Ni.length--,(ti=Ni.length)>4)return dr;for(Vr=[],wi=0;wi1&&"0"===He(ji,0)&&(Vi=We(jr,ji)?16:8,ji=At(ji,8===Vi?1:2)),""===ji)Po=0;else{if(!We(10===Vi?ht:8===Vi?br:Wt,ji))return dr;Po=ot(ji,Vi)}Xt(Vr,Po)}for(wi=0;wi=qe(256,5-ti))return null}else if(Po>255)return null;for(ko=it(Vr),wi=0;wi1?arguments[1]:void 0,wi=se(ti,new wr(Ni,!1,Vr));a||(ti.href=wi.serialize(),ti.origin=wi.getOrigin(),ti.protocol=wi.getProtocol(),ti.username=wi.getUsername(),ti.password=wi.getPassword(),ti.host=wi.getHost(),ti.hostname=wi.getHostname(),ti.port=wi.getPort(),ti.pathname=wi.getPathname(),ti.search=wi.getSearch(),ti.searchParams=wi.getSearchParams(),ti.hash=wi.getHash())},Ci=Ti.prototype,Ai=function(dr,Ni){return{get:function(){return fe(this)[dr]()},set:Ni&&function(ti){return fe(this)[Ni](ti)},configurable:!0,enumerable:!0}};if(a&&(T(Ci,"href",Ai("serialize","setHref")),T(Ci,"origin",Ai("getOrigin")),T(Ci,"protocol",Ai("getProtocol","setProtocol")),T(Ci,"username",Ai("getUsername","setUsername")),T(Ci,"password",Ai("getPassword","setPassword")),T(Ci,"host",Ai("getHost","setHost")),T(Ci,"hostname",Ai("getHostname","setHostname")),T(Ci,"port",Ai("getPort","setPort")),T(Ci,"pathname",Ai("getPathname","setPathname")),T(Ci,"search",Ai("getSearch","setSearch")),T(Ci,"searchParams",Ai("getSearchParams")),T(Ci,"hash",Ai("getHash","setHash"))),m(Ci,"toJSON",function(){return fe(this).serialize()},{enumerable:!0}),m(Ci,"toString",function(){return fe(this).serialize()},{enumerable:!0}),ge){var Ko=ge.createObjectURL,_s=ge.revokeObjectURL;Ko&&m(Ti,"createObjectURL",e(Ko,ge)),_s&&m(Ti,"revokeObjectURL",e(_s,ge))}X(Ti,"URL"),r({global:!0,constructor:!0,forced:!c,sham:!a},{URL:Ti})},95981:(E,C,s)=>{"use strict";s(80504)},71324:()=>{},75242:(E,C,s)=>{"use strict";var r=s(74771);E.exports=r},10323:(E,C,s)=>{"use strict";var r=s(8412);E.exports=r},99940:(E,C,s)=>{"use strict";var r=s(399);E.exports=r},89919:(E,C,s)=>{"use strict";var r=s(98812);E.exports=r},14869:(E,C,s)=>{"use strict";var r=s(33195);E.exports=r},4475:(E,C,s)=>{"use strict";var r=s(46332);E.exports=r},38762:(E,C,s)=>{"use strict";var r=s(42618);E.exports=r},8748:(E,C,s)=>{"use strict";var r=s(63791);s(33089),E.exports=r},71873:(E,C,s)=>{"use strict";var r=s(69029);E.exports=r},61599:(E,C,s)=>{"use strict";var r=s(28924);E.exports=r},34097:(E,C,s)=>{"use strict";s(33089);var r=s(35329),a=s(80112),c=s(23336),u=s(99940),e=Array.prototype,f={DOMTokenList:!0,NodeList:!0};E.exports=function(m){var T=m.entries;return m===e||c(e,m)&&T===e.entries||a(f,r(m))?u:T}},15149:(E,C,s)=>{"use strict";var r=s(98709);E.exports=r},83361:(E,C,s)=>{"use strict";var r=s(65991);E.exports=r},19095:(E,C,s)=>{"use strict";var r=s(64158);E.exports=r},71420:(E,C,s)=>{"use strict";var r=s(91799);E.exports=r},13178:(E,C,s)=>{"use strict";var r=s(26155);E.exports=r},52049:(E,C,s)=>{"use strict";s(33089);var r=s(35329),a=s(80112),c=s(23336),u=s(89919),e=Array.prototype,f={DOMTokenList:!0,NodeList:!0};E.exports=function(m){var T=m.forEach;return m===e||c(e,m)&&T===e.forEach||a(f,r(m))?u:T}},83655:(E,C,s)=>{"use strict";var r=s(33758);E.exports=r},87054:(E,C,s)=>{"use strict";var r=s(7592);E.exports=r},51946:(E,C,s)=>{"use strict";s(33089);var r=s(35329),a=s(80112),c=s(23336),u=s(14869),e=Array.prototype,f={DOMTokenList:!0,NodeList:!0};E.exports=function(m){var T=m.keys;return m===e||c(e,m)&&T===e.keys||a(f,r(m))?u:T}},40764:(E,C,s)=>{"use strict";var r=s(17480);E.exports=r},81214:(E,C,s)=>{"use strict";var r=s(20681);E.exports=r},30252:(E,C,s)=>{"use strict";var r=s(801);E.exports=r},50881:(E,C,s)=>{"use strict";var r=s(90949);E.exports=r},38813:(E,C,s)=>{"use strict";var r=s(99316);E.exports=r},45284:(E,C,s)=>{"use strict";var r=s(62212);E.exports=r},70157:(E,C,s)=>{"use strict";var r=s(49073);E.exports=r},3502:(E,C,s)=>{"use strict";var r=s(24146);E.exports=r},81610:(E,C,s)=>{"use strict";var r=s(40104);E.exports=r},19543:(E,C,s)=>{"use strict";var r=s(3555);E.exports=r},74046:(E,C,s)=>{"use strict";var r=s(42475);E.exports=r},13731:(E,C,s)=>{"use strict";var r=s(65786);E.exports=r},80129:(E,C,s)=>{"use strict";s(33089);var r=s(35329),a=s(80112),c=s(23336),u=s(4475),e=Array.prototype,f={DOMTokenList:!0,NodeList:!0};E.exports=function(m){var T=m.values;return m===e||c(e,m)&&T===e.values||a(f,r(m))?u:T}},43720:(E,C,s)=>{"use strict";var r=s(66306);E.exports=r},640:(E,C,s)=>{"use strict";var r=s(31845);s(33089),E.exports=r},50320:(E,C,s)=>{"use strict";var r=s(44168);E.exports=r},93006:(E,C,s)=>{"use strict";var r=s(25852);E.exports=r},36226:(E,C,s)=>{"use strict";var r=s(24457);E.exports=r},21968:(E,C,s)=>{"use strict";var r=s(99671);E.exports=r},87259:(E,C,s)=>{"use strict";var r=s(38007);E.exports=r},62021:(E,C,s)=>{"use strict";var r=s(57432);E.exports=r},57682:(E,C,s)=>{"use strict";var r=s(36541);E.exports=r},94222:(E,C,s)=>{"use strict";var r=s(17303);E.exports=r},1162:(E,C,s)=>{"use strict";var r=s(62149);E.exports=r},82805:(E,C,s)=>{"use strict";var r=s(86537);E.exports=r},70809:(E,C,s)=>{"use strict";var r=s(79553);E.exports=r},26498:(E,C,s)=>{"use strict";var r=s(80092);s(33089),E.exports=r},44850:(E,C,s)=>{"use strict";var r=s(472);E.exports=r},9634:(E,C,s)=>{"use strict";var r=s(4678);E.exports=r},12118:(E,C,s)=>{"use strict";s(69280);var r=s(13544);E.exports=r.setTimeout},96551:(E,C,s)=>{"use strict";var r=s(61697);s(33089),E.exports=r},98908:(E,C,s)=>{"use strict";var r=s(42497);s(33089),E.exports=r},55434:(E,C,s)=>{"use strict";var r=s(50681);E.exports=r},70906:(E,C,s)=>{"use strict";var r=s(75081);E.exports=r},41530:(E,C,s)=>{"use strict";s(26953),s(56247),s(82842),s(86023);var r=s(13544);E.exports=r.URLSearchParams},75081:(E,C,s)=>{"use strict";s(41530),s(95981),s(37256),s(71324);var r=s(13544);E.exports=r.URL},52243:function(E){var C;C=typeof global<"u"?global:this,E.exports=function(C){if(C.CSS&&C.CSS.escape)return C.CSS.escape;var s=function(r){if(0==arguments.length)throw new TypeError("`CSS.escape` requires an argument.");for(var e,a=String(r),c=a.length,u=-1,f="",m=a.charCodeAt(0);++u=1&&e<=31||127==e||0==u&&e>=48&&e<=57||1==u&&e>=48&&e<=57&&45==m?"\\"+e.toString(16)+" ":0==u&&1==c&&45==e||!(e>=128||45==e||95==e||e>=48&&e<=57||e>=65&&e<=90||e>=97&&e<=122)?"\\"+a.charAt(u):a.charAt(u):f+="\ufffd";return f};return C.CSS||(C.CSS={}),C.CSS.escape=s,s}(C)},97057:(E,C,s)=>{"use strict";s.d(C,{qY:()=>U});var r=function(){for(var V=0,ce=0,se=arguments.length;ce"u"&&typeof navigator<"u"&&"ReactNative"===navigator.product?new f:typeof navigator<"u"?J(navigator.userAgent):function X(){return typeof process<"u"&&process.version?new c(process.version.slice(1)):null}()}function J(V){var ce=function W(V){return""!==V&&w.reduce(function(ce,se){var fe=se[0];if(ce)return ce;var $e=se[1].exec(V);return!!$e&&[fe,$e]},!1)}(V);if(!ce)return null;var se=ce[0],fe=ce[1];if("searchbot"===se)return new e;var Te=fe[1]&&fe[1].split(/[._]/).slice(0,3);Te?Te.length1?Wt-1:0),wn=1;wn/gm),At=f(/^data-[\-\w.\u00B7-\uFFFF]/),qt=f(/^aria-[\-\w]+$/),sn=f(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),fn=f(/^(?:\w+script|data):/i),xn=f(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),Kr="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(br){return typeof br}:function(br){return br&&"function"==typeof Symbol&&br.constructor===Symbol&&br!==Symbol.prototype?"symbol":typeof br};function Or(br){if(Array.isArray(br)){for(var ht=0,Wt=Array(br.length);ht"u"?null:window},ir=function(ht,Wt){if("object"!==(typeof ht>"u"?"undefined":Kr(ht))||"function"!=typeof ht.createPolicy)return null;var Tt=null,wn="data-tt-policy-suffix";Wt.currentScript&&Wt.currentScript.hasAttribute(wn)&&(Tt=Wt.currentScript.getAttribute(wn));var jn="dompurify"+(Tt?"#"+Tt:"");try{return ht.createPolicy(jn,{createHTML:function(Oi){return Oi}})}catch{return console.warn("TrustedTypes policy "+jn+" could not be created."),null}};return function Qr(){var br=arguments.length>0&&void 0!==arguments[0]?arguments[0]:Lr(),ht=function(zr){return Qr(zr)};if(ht.version="2.3.3",ht.removed=[],!br||!br.document||9!==br.document.nodeType)return ht.isSupported=!1,ht;var Wt=br.document,Tt=br.document,wn=br.DocumentFragment,jn=br.HTMLTemplateElement,hr=br.Node,Oi=br.Element,Wi=br.NodeFilter,so=br.NamedNodeMap,kr=void 0===so?br.NamedNodeMap||br.MozNamedAttrMap:so,Ei=br.Text,ii=br.Comment,mr=br.DOMParser,pr=br.trustedTypes,Eo=Oi.prototype,po=ge(Eo,"cloneNode"),$i=ge(Eo,"nextSibling"),qr=ge(Eo,"childNodes"),Hi=ge(Eo,"parentNode");if("function"==typeof jn){var Dn=Tt.createElement("template");Dn.content&&Dn.content.ownerDocument&&(Tt=Dn.content.ownerDocument)}var Hn=ir(pr,Wt),jt=Hn&&wi?Hn.createHTML(""):"",Ie=Tt.implementation,et=Tt.createNodeIterator,ze=Tt.createDocumentFragment,an=Tt.getElementsByTagName,lt=Wt.importNode,Rt={};try{Rt=$e(Tt).documentMode?Tt.documentMode:{}}catch{}var Pe={};ht.isSupported="function"==typeof Hi&&Ie&&typeof Ie.createHTMLDocument<"u"&&9!==Rt;var qn=pn,gr=Rn,Pn=At,_r=qt,Pr=fn,tr=xn,Zn=sn,nr=null,Zt=Te({},[].concat(Or(Et),Or(ot),Or(ct),Or(He),Or(Le))),dn=null,Ge=Te({},[].concat(Or(Pt),Or(it),Or(Xt),Or(cn))),Ot=null,mn=null,wr=!0,Ti=!0,Ci=!1,Ai=!1,Ko=!1,_s=!1,dr=!1,Ni=!1,ti=!1,Vr=!0,wi=!1,ji=!0,Vi=!0,Po=!1,ko={},Ir=null,ro=Te({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]),Vt=null,bn=Te({},["audio","video","img","source","image","track"]),Bn=null,ci=Te({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),_o="http://www.w3.org/1998/Math/MathML",go="http://www.w3.org/2000/svg",es="http://www.w3.org/1999/xhtml",ts=es,jo=!1,ss=void 0,gs=["application/xhtml+xml","text/html"],la=void 0,Ro=null,jl=Tt.createElement("form"),gl=function(zr){Ro&&Ro===zr||((!zr||"object"!==(typeof zr>"u"?"undefined":Kr(zr)))&&(zr={}),zr=$e(zr),nr="ALLOWED_TAGS"in zr?Te({},zr.ALLOWED_TAGS):Zt,dn="ALLOWED_ATTR"in zr?Te({},zr.ALLOWED_ATTR):Ge,Bn="ADD_URI_SAFE_ATTR"in zr?Te($e(ci),zr.ADD_URI_SAFE_ATTR):ci,Vt="ADD_DATA_URI_TAGS"in zr?Te($e(bn),zr.ADD_DATA_URI_TAGS):bn,Ir="FORBID_CONTENTS"in zr?Te({},zr.FORBID_CONTENTS):ro,Ot="FORBID_TAGS"in zr?Te({},zr.FORBID_TAGS):{},mn="FORBID_ATTR"in zr?Te({},zr.FORBID_ATTR):{},ko="USE_PROFILES"in zr&&zr.USE_PROFILES,wr=!1!==zr.ALLOW_ARIA_ATTR,Ti=!1!==zr.ALLOW_DATA_ATTR,Ci=zr.ALLOW_UNKNOWN_PROTOCOLS||!1,Ai=zr.SAFE_FOR_TEMPLATES||!1,Ko=zr.WHOLE_DOCUMENT||!1,Ni=zr.RETURN_DOM||!1,ti=zr.RETURN_DOM_FRAGMENT||!1,Vr=!1!==zr.RETURN_DOM_IMPORT,wi=zr.RETURN_TRUSTED_TYPE||!1,dr=zr.FORCE_BODY||!1,ji=!1!==zr.SANITIZE_DOM,Vi=!1!==zr.KEEP_CONTENT,Po=zr.IN_PLACE||!1,Zn=zr.ALLOWED_URI_REGEXP||Zn,ts=zr.NAMESPACE||es,ss=ss=-1===gs.indexOf(zr.PARSER_MEDIA_TYPE)?"text/html":zr.PARSER_MEDIA_TYPE,la="application/xhtml+xml"===ss?function(io){return io}:$,Ai&&(Ti=!1),ti&&(Ni=!0),ko&&(nr=Te({},[].concat(Or(Le))),dn=[],!0===ko.html&&(Te(nr,Et),Te(dn,Pt)),!0===ko.svg&&(Te(nr,ot),Te(dn,it),Te(dn,cn)),!0===ko.svgFilters&&(Te(nr,ct),Te(dn,it),Te(dn,cn)),!0===ko.mathMl&&(Te(nr,He),Te(dn,Xt),Te(dn,cn))),zr.ADD_TAGS&&(nr===Zt&&(nr=$e(nr)),Te(nr,zr.ADD_TAGS)),zr.ADD_ATTR&&(dn===Ge&&(dn=$e(dn)),Te(dn,zr.ADD_ATTR)),zr.ADD_URI_SAFE_ATTR&&Te(Bn,zr.ADD_URI_SAFE_ATTR),zr.FORBID_CONTENTS&&(Ir===ro&&(Ir=$e(Ir)),Te(Ir,zr.FORBID_CONTENTS)),Vi&&(nr["#text"]=!0),Ko&&Te(nr,["html","head","body"]),nr.table&&(Te(nr,["tbody"]),delete Ot.tbody),e&&e(zr),Ro=zr)},qa=Te({},["mi","mo","mn","ms","mtext"]),da=Te({},["foreignobject","desc","title","annotation-xml"]),$a=Te({},ot);Te($a,ct),Te($a,qe);var Rl=Te({},He);Te(Rl,We);var Ha=function(zr){W(ht.removed,{element:zr});try{zr.parentNode.removeChild(zr)}catch{try{zr.outerHTML=jt}catch{zr.remove()}}},Ts=function(zr,io){try{W(ht.removed,{attribute:io.getAttributeNode(zr),from:io})}catch{W(ht.removed,{attribute:null,from:io})}if(io.removeAttribute(zr),"is"===zr&&!dn[zr])if(Ni||ti)try{Ha(io)}catch{}else try{io.setAttribute(zr,"")}catch{}},hs=function(zr){var io=void 0,gt=void 0;if(dr)zr=""+zr;else{var Tn=J(zr,/^[\r\n\t ]+/);gt=Tn&&Tn[0]}"application/xhtml+xml"===ss&&(zr=''+zr+"");var ie=Hn?Hn.createHTML(zr):zr;if(ts===es)try{io=(new mr).parseFromString(ie,ss)}catch{}if(!io||!io.documentElement){io=Ie.createDocument(ts,"template",null);try{io.documentElement.innerHTML=jo?"":ie}catch{}}var Ze=io.body||io.documentElement;return zr&>&&Ze.insertBefore(Tt.createTextNode(gt),Ze.childNodes[0]||null),ts===es?an.call(io,Ko?"html":"body")[0]:Ko?io.documentElement:Ze},$s=function(zr){return et.call(zr.ownerDocument||zr,zr,Wi.SHOW_ELEMENT|Wi.SHOW_COMMENT|Wi.SHOW_TEXT,null,!1)},Ja=function(zr){return"object"===(typeof hr>"u"?"undefined":Kr(hr))?zr instanceof hr:zr&&"object"===(typeof zr>"u"?"undefined":Kr(zr))&&"number"==typeof zr.nodeType&&"string"==typeof zr.nodeName},fa=function(zr,io,gt){Pe[zr]&&D(Pe[zr],function(Tn){Tn.call(ht,io,gt,Ro)})},Xo=function(zr){var io=void 0;if(fa("beforeSanitizeElements",zr,null),function(zr){return!(zr instanceof Ei||zr instanceof ii||"string"==typeof zr.nodeName&&"string"==typeof zr.textContent&&"function"==typeof zr.removeChild&&zr.attributes instanceof kr&&"function"==typeof zr.removeAttribute&&"function"==typeof zr.setAttribute&&"string"==typeof zr.namespaceURI&&"function"==typeof zr.insertBefore)}(zr)||J(zr.nodeName,/[\u0080-\uFFFF]/))return Ha(zr),!0;var gt=la(zr.nodeName);if(fa("uponSanitizeElement",zr,{tagName:gt,allowedTags:nr}),!Ja(zr.firstElementChild)&&(!Ja(zr.content)||!Ja(zr.content.firstElementChild))&&V(/<[/\w]/g,zr.innerHTML)&&V(/<[/\w]/g,zr.textContent)||"select"===gt&&V(/