summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator
parentInitial commit. (diff)
downloadceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz
ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator')
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/01-hosts.e2e-spec.ts86
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/03-inventory.e2e-spec.ts26
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/04-osds.e2e-spec.ts50
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/05-services.e2e-spec.ts36
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature63
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature26
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature74
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts47
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts41
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts67
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts99
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts24
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts49
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts114
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts83
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts52
16 files changed, 937 insertions, 0 deletions
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/01-hosts.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/01-hosts.e2e-spec.ts
new file mode 100644
index 000000000..aca36ade1
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/01-hosts.e2e-spec.ts
@@ -0,0 +1,86 @@
+import { HostsPageHelper } from '../cluster/hosts.po';
+
+describe('Hosts page', () => {
+ const hosts = new HostsPageHelper();
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ hosts.navigateTo();
+ });
+
+ describe('when Orchestrator is available', () => {
+ beforeEach(function () {
+ cy.fixture('orchestrator/inventory.json').as('hosts');
+ cy.fixture('orchestrator/services.json').as('services');
+ });
+
+ it('should not add an exsiting host', function () {
+ const hostname = Cypress._.sample(this.hosts).name;
+ hosts.navigateTo('add');
+ hosts.add(hostname, true);
+ });
+
+ it('should drain and remove a host and then add it back', function () {
+ const hostname = Cypress._.last(this.hosts)['name'];
+
+ // should drain the host first before deleting
+ hosts.drain(hostname);
+ hosts.remove(hostname);
+
+ // add it back
+ hosts.navigateTo('add');
+ hosts.add(hostname);
+ hosts.checkExist(hostname, true);
+ });
+
+ it('should display inventory', function () {
+ for (const host of this.hosts) {
+ hosts.clickTab('cd-host-details', host.name, 'Physical Disks');
+ cy.get('cd-host-details').within(() => {
+ hosts.expectTableCount('total', host.devices.length);
+ });
+ }
+ });
+
+ it('should display daemons', function () {
+ for (const host of this.hosts) {
+ hosts.clickTab('cd-host-details', host.name, 'Daemons');
+ cy.get('cd-host-details').within(() => {
+ hosts.getTableCount('total').should('be.gte', 0);
+ });
+ }
+ });
+
+ it('should edit host labels', function () {
+ const hostname = Cypress._.sample(this.hosts).name;
+ const labels = ['foo', 'bar'];
+ hosts.editLabels(hostname, labels, true);
+ hosts.editLabels(hostname, labels, false);
+ });
+
+ it('should enter host into maintenance', function () {
+ const hostname = Cypress._.sample(this.hosts).name;
+ const serviceList = new Array();
+ this.services.forEach((service: any) => {
+ if (hostname === service.hostname) {
+ serviceList.push(service.daemon_type);
+ }
+ });
+ let enterMaintenance = true;
+ serviceList.forEach((service: string) => {
+ if (service === 'mgr' || service === 'alertmanager') {
+ enterMaintenance = false;
+ }
+ });
+ if (enterMaintenance) {
+ hosts.maintenance(hostname);
+ }
+ });
+
+ it('should exit host from maintenance', function () {
+ const hostname = Cypress._.sample(this.hosts).name;
+ hosts.maintenance(hostname, true);
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/03-inventory.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/03-inventory.e2e-spec.ts
new file mode 100644
index 000000000..a64e3bc8c
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/03-inventory.e2e-spec.ts
@@ -0,0 +1,26 @@
+import { InventoryPageHelper } from '../cluster/inventory.po';
+
+describe('Physical Disks page', () => {
+ const inventory = new InventoryPageHelper();
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ inventory.navigateTo();
+ });
+
+ it('should have correct devices', () => {
+ cy.fixture('orchestrator/inventory.json').then((hosts) => {
+ const totalDiskCount = Cypress._.sumBy(hosts, 'devices.length');
+ inventory.expectTableCount('total', totalDiskCount);
+ for (const host of hosts) {
+ inventory.filterTable('Hostname', host['name']);
+ inventory.getTableCount('found').should('be.eq', host.devices.length);
+ }
+ });
+ });
+
+ it('should identify device', () => {
+ inventory.identify();
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/04-osds.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/04-osds.e2e-spec.ts
new file mode 100644
index 000000000..41f0933b7
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/04-osds.e2e-spec.ts
@@ -0,0 +1,50 @@
+import { OSDsPageHelper } from '../cluster/osds.po';
+import { DashboardPageHelper } from '../ui/dashboard.po';
+
+describe('OSDs page', () => {
+ const osds = new OSDsPageHelper();
+ const dashboard = new DashboardPageHelper();
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ osds.navigateTo();
+ });
+
+ describe('when Orchestrator is available', () => {
+ it('should create and delete OSDs', () => {
+ osds.getTableCount('total').as('initOSDCount');
+ osds.navigateTo('create');
+ osds.create('hdd');
+
+ cy.get('@newOSDCount').then((newCount) => {
+ cy.get('@initOSDCount').then((oldCount) => {
+ const expectedCount = Number(oldCount) + Number(newCount);
+
+ // check total rows
+ osds.expectTableCount('total', expectedCount);
+
+ // landing page is easier to check OSD status
+ dashboard.navigateTo();
+ dashboard.infoCardBody('OSDs').should('contain.text', `${expectedCount} total`);
+ dashboard.infoCardBody('OSDs').should('contain.text', `${expectedCount} up`);
+ dashboard.infoCardBody('OSDs').should('contain.text', `${expectedCount} in`);
+
+ cy.wait(30000);
+ expect(Number(newCount)).to.be.gte(2);
+ // Delete the first OSD we created
+ osds.navigateTo();
+ const deleteOsdId = Number(oldCount);
+ osds.deleteByIDs([deleteOsdId], false);
+ osds.ensureNoOsd(deleteOsdId);
+
+ cy.wait(30000);
+ // Replace the second OSD we created
+ const replaceID = Number(oldCount) + 1;
+ osds.deleteByIDs([replaceID], true);
+ osds.checkStatus(replaceID, ['destroyed']);
+ });
+ });
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/05-services.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/05-services.e2e-spec.ts
new file mode 100644
index 000000000..fb5e6ac89
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/05-services.e2e-spec.ts
@@ -0,0 +1,36 @@
+import { ServicesPageHelper } from '../cluster/services.po';
+
+describe('Services page', () => {
+ const services = new ServicesPageHelper();
+ const serviceName = 'rgw.foo';
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ services.navigateTo();
+ });
+
+ describe('when Orchestrator is available', () => {
+ it('should create an rgw service', () => {
+ services.navigateTo('create');
+ services.addService('rgw');
+
+ services.checkExist(serviceName, true);
+ });
+
+ it('should edit a service', () => {
+ const count = '2';
+ services.editService(serviceName, count);
+ services.expectPlacementCount(serviceName, count);
+ });
+
+ it('should create and delete an ingress service', () => {
+ services.navigateTo('create');
+ services.addService('ingress');
+
+ services.checkExist('ingress.rgw.foo', true);
+
+ services.deleteService('ingress.rgw.foo');
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature
new file mode 100644
index 000000000..62476ad25
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature
@@ -0,0 +1,63 @@
+Feature: Grafana panels
+
+ Go to some of the grafana performance section and check if
+ panels are populated without any issues
+
+ Background: Log in
+ Given I am logged in
+
+ Scenario Outline: Hosts Overall Performance
+ Given I am on the "hosts" page
+ When I go to the "Overall Performance" tab
+ Then I should see the grafana panel "<panel>"
+ When I view the grafana panel "<panel>"
+ Then I should not see "No Data" in the panel "<panel>"
+
+ Examples:
+ | panel |
+ | OSD Hosts |
+ | AVG CPU Busy |
+ | AVG RAM Utilization |
+ | Physical IOPS |
+ | AVG Disk Utilization |
+ | Network Load |
+ | CPU Busy - Top 10 Hosts |
+ | Network Load - Top 10 Hosts |
+
+ Scenario Outline: RGW Daemon Overall Performance
+ Given I am on the "rgw daemons" page
+ When I go to the "Overall Performance" tab
+ Then I should see the grafana panel "<panel>"
+ When I view the grafana panel "<panel>"
+ Then I should not see No Data in the graph "<panel>"
+ And I should see the legends "<legends>" in the graph "<panel>"
+
+ Examples:
+ | panel | legends |
+ | Total Requests/sec by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 |
+ | GET Latencies by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 |
+ | Bandwidth by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 |
+ | PUT Latencies by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 |
+ | Average GET/PUT Latencies | GET AVG, PUT AVG |
+ | Bandwidth Consumed by Type | GETs, PUTs |
+
+ Scenario Outline: RGW per Daemon Performance
+ Given I am on the "rgw daemons" page
+ When I expand the row "<name>"
+ And I go to the "Performance Details" tab
+ Then I should see the grafana panel "<panel>"
+ When I view the grafana panel "<panel>"
+ Then I should not see No Data in the graph "<panel>"
+ And I should see the legends "<name>" in the graph "<panel>"
+
+ Examples:
+ | name | panel |
+ | foo.ceph-node-00 | Bandwidth by HTTP Operation |
+ | foo.ceph-node-00 | HTTP Request Breakdown |
+ | foo.ceph-node-00 | Workload Breakdown |
+ | foo.ceph-node-01 | Bandwidth by HTTP Operation |
+ | foo.ceph-node-01 | HTTP Request Breakdown |
+ | foo.ceph-node-01 | Workload Breakdown |
+ | foo.ceph-node-02 | Bandwidth by HTTP Operation |
+ | foo.ceph-node-02 | HTTP Request Breakdown |
+ | foo.ceph-node-02 | Workload Breakdown |
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature
new file mode 100644
index 000000000..6ba2fc4fc
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature
@@ -0,0 +1,26 @@
+Feature: Cluster expansion welcome screen
+
+ Go to the welcome screen and decide whether
+ to proceed to wizard or skips to landing page
+
+ Background: Login
+ Given I am logged in
+
+ Scenario: Cluster expansion welcome screen
+ Given I am on the "welcome" page
+ And I should see a button to "Expand Cluster"
+ And I should see a button to "Skip"
+ And I should see a message "Please expand your cluster first"
+
+ Scenario: Go to the Cluster expansion wizard
+ Given I am on the "welcome" page
+ And I should see a button to "Expand Cluster"
+ When I click on "Expand Cluster" button
+ Then I am on the "Add Hosts" section
+
+ Scenario: Skips the process and go to the landing page
+ Given I am on the "welcome" page
+ And I should see a button to "Skip"
+ When I click on "Skip" button
+ And I confirm to "Continue"
+ Then I should be on the "dashboard" page
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature
new file mode 100644
index 000000000..93c10833d
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature
@@ -0,0 +1,74 @@
+Feature: Cluster expansion host addition
+
+ Add some hosts and perform some host related actions like editing the labels
+ and removing the hosts from the cluster and verify all of the actions are performed
+ as expected
+
+ Background: Cluster expansion wizard
+ Given I am logged in
+ And I am on the "welcome" page
+ And I click on "Expand Cluster" button
+
+ Scenario Outline: Add hosts
+ Given I am on the "Add Hosts" section
+ When I click on "Add" button
+ And enter "hostname" "<hostname>"
+ And select options "<labels>"
+ And I click on submit button
+ Then I should see a row with "<hostname>"
+ And I should see row "<hostname>" have "<labels>"
+
+ Examples:
+ | hostname | labels |
+ | ceph-node-01 | mon, mgr |
+ | ceph-node-02 ||
+
+ Scenario Outline: Remove hosts
+ Given I am on the "Add Hosts" section
+ And I should see a row with "<hostname>"
+ When I select a row "<hostname>"
+ And I click on "Remove" button from the table actions
+ Then I should see the modal
+ And I check the tick box in modal
+ And I click on "Remove Host" button
+ Then I should not see the modal
+ And I should not see a row with "<hostname>"
+
+ Examples:
+ | hostname |
+ | ceph-node-01 |
+ | ceph-node-02 |
+
+ Scenario: Add hosts using pattern 'ceph-node-[01-02]'
+ Given I am on the "Add Hosts" section
+ When I click on "Add" button
+ And enter "hostname" "ceph-node-[01-02]"
+ And I click on submit button
+ Then I should see rows with following entries
+ | hostname |
+ | ceph-node-01 |
+ | ceph-node-02 |
+
+ Scenario: Add exisiting host and verify it failed
+ Given I am on the "Add Hosts" section
+ And I should see a row with "ceph-node-00"
+ When I click on "Add" button
+ And enter "hostname" "ceph-node-00"
+ Then I should see an error in "hostname" field
+
+ Scenario Outline: Add and remove labels on host
+ Given I am on the "Add Hosts" section
+ When I select a row "<hostname>"
+ And I click on "Edit" button from the table actions
+ And "add" option "<labels>"
+ And I click on submit button
+ Then I should see row "<hostname>" have "<labels>"
+ When I select a row "<hostname>"
+ And I click on "Edit" button from the table actions
+ And "remove" option "<labels>"
+ And I click on submit button
+ Then I should see row "<hostname>" does not have "<labels>"
+
+ Examples:
+ | hostname | labels |
+ | ceph-node-01 | foo |
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts
new file mode 100644
index 000000000..7668cafcf
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts
@@ -0,0 +1,47 @@
+/* tslint:disable*/
+import {
+ CreateClusterServicePageHelper,
+ CreateClusterWizardHelper
+} from '../../cluster/create-cluster.po';
+/* tslint:enable*/
+
+describe('Create cluster create services page', () => {
+ const createCluster = new CreateClusterWizardHelper();
+ const createClusterServicePage = new CreateClusterServicePageHelper();
+
+ const createService = (serviceType: string, serviceName: string, count = '1') => {
+ cy.get('[aria-label=Create]').first().click();
+ createClusterServicePage.addService(serviceType, false, count);
+ createClusterServicePage.checkExist(serviceName, true);
+ };
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ createCluster.navigateTo();
+ createCluster.createCluster();
+ cy.get('.nav-link').contains('Create Services').click();
+ });
+
+ it('should check if title contains Create Services', () => {
+ cy.get('.title').should('contain.text', 'Create Services');
+ });
+
+ describe('when Orchestrator is available', () => {
+ const serviceName = 'mds.test';
+
+ it('should create an mds service', () => {
+ createService('mds', serviceName, '1');
+ });
+
+ it('should edit a service', () => {
+ const daemonCount = '2';
+ createClusterServicePage.editService(serviceName, daemonCount);
+ createClusterServicePage.expectPlacementCount(serviceName, daemonCount);
+ });
+
+ it('should delete mds service', () => {
+ createClusterServicePage.deleteService('mds.test');
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts
new file mode 100644
index 000000000..a82be9855
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts
@@ -0,0 +1,41 @@
+/* tslint:disable*/
+import { CreateClusterWizardHelper } from '../../cluster/create-cluster.po';
+import { OSDsPageHelper } from '../../cluster/osds.po';
+/* tslint:enable*/
+
+const osds = new OSDsPageHelper();
+
+describe('Create cluster create osds page', () => {
+ const createCluster = new CreateClusterWizardHelper();
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ createCluster.navigateTo();
+ createCluster.createCluster();
+ cy.get('.nav-link').contains('Create OSDs').click();
+ });
+
+ it('should check if title contains Create OSDs', () => {
+ cy.get('.title').should('contain.text', 'Create OSDs');
+ });
+
+ describe('when Orchestrator is available', () => {
+ it('should create OSDs', () => {
+ const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02'];
+ for (const hostname of hostnames) {
+ osds.create('hdd', hostname, true);
+
+ // Go to the Review section and Expand the cluster
+ // because the drive group spec is only stored
+ // in frontend and will be lost when refreshed
+ cy.get('.nav-link').contains('Review').click();
+ cy.get('button[aria-label="Next"]').click();
+ cy.get('cd-dashboard').should('exist');
+ createCluster.navigateTo();
+ createCluster.createCluster();
+ cy.get('.nav-link').contains('Create OSDs').click();
+ }
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts
new file mode 100644
index 000000000..f93ad7a97
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts
@@ -0,0 +1,67 @@
+/* tslint:disable*/
+import {
+ CreateClusterHostPageHelper,
+ CreateClusterWizardHelper
+} from '../../cluster/create-cluster.po';
+/* tslint:enable*/
+
+describe('Create Cluster Review page', () => {
+ const createCluster = new CreateClusterWizardHelper();
+ const createClusterHostPage = new CreateClusterHostPageHelper();
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ createCluster.navigateTo();
+ createCluster.createCluster();
+
+ cy.get('.nav-link').contains('Review').click();
+ });
+
+ describe('navigation link test', () => {
+ it('should check if active nav-link is of Review section', () => {
+ cy.get('.nav-link.active').should('contain.text', 'Review');
+ });
+ });
+
+ describe('fields check', () => {
+ it('should check cluster resources table is present', () => {
+ // check for table header 'Cluster Resources'
+ createCluster.getLegends().its(0).should('have.text', 'Cluster Resources');
+
+ // check for fields in table
+ createCluster.getStatusTables().should('contain.text', 'Hosts');
+ createCluster.getStatusTables().should('contain.text', 'Storage Capacity');
+ createCluster.getStatusTables().should('contain.text', 'CPUs');
+ createCluster.getStatusTables().should('contain.text', 'Memory');
+ });
+
+ it('should check Host Details table is present', () => {
+ // check for there to be two tables
+ createCluster.getDataTables().should('have.length', 1);
+
+ // verify correct columns on Host Details table
+ createCluster.getDataTableHeaders(0).contains('Hostname');
+
+ createCluster.getDataTableHeaders(0).contains('Labels');
+
+ createCluster.getDataTableHeaders(0).contains('CPUs');
+
+ createCluster.getDataTableHeaders(0).contains('Cores');
+
+ createCluster.getDataTableHeaders(0).contains('Total Memory');
+
+ createCluster.getDataTableHeaders(0).contains('Raw Capacity');
+
+ createCluster.getDataTableHeaders(0).contains('HDDs');
+
+ createCluster.getDataTableHeaders(0).contains('Flash');
+
+ createCluster.getDataTableHeaders(0).contains('NICs');
+ });
+
+ it('should check default host name is present', () => {
+ createClusterHostPage.check_for_host();
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts
new file mode 100644
index 000000000..589cbaa90
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts
@@ -0,0 +1,99 @@
+/* tslint:disable*/
+import { Input, ManagerModulesPageHelper } from '../../cluster/mgr-modules.po';
+import { CreateClusterWizardHelper } from '../../cluster/create-cluster.po';
+import { HostsPageHelper } from '../../cluster/hosts.po';
+import { ServicesPageHelper } from '../../cluster/services.po';
+/* tslint:enable*/
+
+describe('when cluster creation is completed', () => {
+ const createCluster = new CreateClusterWizardHelper();
+ const services = new ServicesPageHelper();
+ const hosts = new HostsPageHelper();
+ const mgrmodules = new ManagerModulesPageHelper();
+
+ const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02', 'ceph-node-03'];
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ });
+
+ it('should redirect to dashboard landing page after cluster creation', () => {
+ createCluster.navigateTo();
+ createCluster.createCluster();
+
+ cy.get('.nav-link').contains('Review').click();
+ cy.get('button[aria-label="Next"]').click();
+ cy.get('cd-dashboard').should('exist');
+ });
+
+ describe('Hosts page', () => {
+ beforeEach(() => {
+ hosts.navigateTo();
+ });
+
+ it('should check if monitoring stacks are running on the root host', () => {
+ const monitoringStack = ['alertmanager', 'grafana', 'node-exporter', 'prometheus'];
+ hosts.clickTab('cd-host-details', 'ceph-node-00', 'Daemons');
+ for (const daemon of monitoringStack) {
+ cy.get('cd-host-details').within(() => {
+ services.checkServiceStatus(daemon);
+ });
+ }
+ });
+
+ // avoid creating node-exporter on the newly added host
+ // to favour the host draining process
+ it('should reduce the count for node-exporter', () => {
+ services.editService('node-exporter', '3');
+ });
+
+ // grafana ip address is set to the fqdn by default.
+ // kcli is not working with that, so setting the IP manually.
+ it('should change ip address of grafana', { retries: 2 }, () => {
+ const dashboardArr: Input[] = [
+ {
+ id: 'GRAFANA_API_URL',
+ newValue: 'https://192.168.100.100:3000',
+ oldValue: ''
+ }
+ ];
+ mgrmodules.editMgrModule('dashboard', dashboardArr);
+ });
+
+ it('should add one more host', () => {
+ hosts.navigateTo('add');
+ hosts.add(hostnames[3]);
+ hosts.checkExist(hostnames[3], true);
+ });
+
+ it('should have removed "_no_schedule" label', () => {
+ for (const hostname of hostnames) {
+ hosts.checkLabelExists(hostname, ['_no_schedule'], false);
+ }
+ });
+
+ it('should display inventory', () => {
+ hosts.clickTab('cd-host-details', hostnames[1], 'Physical Disks');
+ cy.get('cd-host-details').within(() => {
+ hosts.getTableCount('total').should('be.gte', 0);
+ });
+ });
+
+ it('should display daemons', () => {
+ hosts.clickTab('cd-host-details', hostnames[1], 'Daemons');
+ cy.get('cd-host-details').within(() => {
+ hosts.getTableCount('total').should('be.gte', 0);
+ });
+ });
+
+ it('should check if mon daemon is running on all hosts', () => {
+ for (const hostname of hostnames) {
+ hosts.clickTab('cd-host-details', hostname, 'Daemons');
+ cy.get('cd-host-details').within(() => {
+ services.checkServiceStatus('mon');
+ });
+ }
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts
new file mode 100644
index 000000000..a0a1dd032
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts
@@ -0,0 +1,24 @@
+/* tslint:disable*/
+import { OSDsPageHelper } from '../../cluster/osds.po';
+/* tslint:enable*/
+
+describe('OSDs page', () => {
+ const osds = new OSDsPageHelper();
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ osds.navigateTo();
+ });
+
+ it('should check if atleast 3 osds are created', { retries: 3 }, () => {
+ // we have created a total of more than 3 osds throughout
+ // the whole tests so ensuring that atleast
+ // 3 osds are listed in the table. Since the OSD
+ // creation can take more time going with
+ // retry of 3
+ for (let id = 0; id < 3; id++) {
+ osds.checkStatus(id, ['in', 'up']);
+ }
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts
new file mode 100644
index 000000000..374ecdb0c
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts
@@ -0,0 +1,49 @@
+/* tslint:disable*/
+import { HostsPageHelper } from '../../cluster/hosts.po';
+import { ServicesPageHelper } from '../../cluster/services.po';
+/* tslint:enable*/
+
+describe('Host Page', () => {
+ const hosts = new HostsPageHelper();
+ const services = new ServicesPageHelper();
+
+ const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02', 'ceph-node-03'];
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ hosts.navigateTo();
+ });
+
+ // rgw is needed for testing the force maintenance
+ it('should create rgw services', () => {
+ services.navigateTo('create');
+ services.addService('rgw', false, '4');
+ services.checkExist('rgw.foo', true);
+ });
+
+ it('should check if rgw daemon is running on all hosts', () => {
+ for (const hostname of hostnames) {
+ hosts.clickTab('cd-host-details', hostname, 'Daemons');
+ cy.get('cd-host-details').within(() => {
+ services.checkServiceStatus('rgw');
+ });
+ }
+ });
+
+ it('should force maintenance and exit', () => {
+ hosts.maintenance(hostnames[3], true, true);
+ });
+
+ it('should drain, remove and add the host back', () => {
+ hosts.drain(hostnames[3]);
+ hosts.remove(hostnames[3]);
+ hosts.navigateTo('add');
+ hosts.add(hostnames[3]);
+ hosts.checkExist(hostnames[3], true);
+ });
+
+ it('should show the exact count of daemons', () => {
+ hosts.checkServiceInstancesExist(hostnames[0], ['mgr: 1', 'prometheus: 1']);
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts
new file mode 100644
index 000000000..ed9ffb989
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts
@@ -0,0 +1,114 @@
+/* tslint:disable*/
+import { ServicesPageHelper } from '../../cluster/services.po';
+/* tslint:enable*/
+
+describe('Services page', () => {
+ const services = new ServicesPageHelper();
+ const mdsDaemonName = 'mds.test';
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ services.navigateTo();
+ });
+
+ it('should check if rgw service is created', () => {
+ services.checkExist('rgw.foo', true);
+ });
+
+ it('should create an mds service', () => {
+ services.navigateTo('create');
+ services.addService('mds', false);
+ services.checkExist(mdsDaemonName, true);
+
+ services.clickServiceTab(mdsDaemonName, 'Details');
+ cy.get('cd-service-details').within(() => {
+ services.checkServiceStatus(mdsDaemonName);
+ });
+ });
+
+ it('should stop a daemon', () => {
+ services.clickServiceTab(mdsDaemonName, 'Details');
+ services.checkServiceStatus(mdsDaemonName);
+
+ services.daemonAction('mds', 'stop');
+ services.checkServiceStatus(mdsDaemonName, 'stopped');
+ });
+
+ it('should restart a daemon', () => {
+ services.checkExist(mdsDaemonName, true);
+ services.clickServiceTab(mdsDaemonName, 'Details');
+ services.daemonAction('mds', 'restart');
+ services.checkServiceStatus(mdsDaemonName, 'running');
+ });
+
+ it('should redeploy a daemon', () => {
+ services.checkExist(mdsDaemonName, true);
+ services.clickServiceTab(mdsDaemonName, 'Details');
+
+ services.daemonAction('mds', 'stop');
+ services.checkServiceStatus(mdsDaemonName, 'stopped');
+ services.daemonAction('mds', 'redeploy');
+ services.checkServiceStatus(mdsDaemonName, 'running');
+ });
+
+ it('should start a daemon', () => {
+ services.checkExist(mdsDaemonName, true);
+ services.clickServiceTab(mdsDaemonName, 'Details');
+
+ services.daemonAction('mds', 'stop');
+ services.checkServiceStatus(mdsDaemonName, 'stopped');
+ services.daemonAction('mds', 'start');
+ services.checkServiceStatus(mdsDaemonName, 'running');
+ });
+
+ it('should delete an mds service', () => {
+ services.deleteService(mdsDaemonName);
+ });
+
+ it('should create and delete snmp-gateway service with version V2c', () => {
+ services.navigateTo('create');
+ services.addService('snmp-gateway', false, '1', 'V2c');
+ services.checkExist('snmp-gateway', true);
+
+ services.clickServiceTab('snmp-gateway', 'Details');
+ cy.get('cd-service-details').within(() => {
+ services.checkServiceStatus('snmp-gateway');
+ });
+
+ services.deleteService('snmp-gateway');
+ });
+
+ it('should create and delete snmp-gateway service with version V3', () => {
+ services.navigateTo('create');
+ services.addService('snmp-gateway', false, '1', 'V3', true);
+ services.checkExist('snmp-gateway', true);
+
+ services.clickServiceTab('snmp-gateway', 'Details');
+ cy.get('cd-service-details').within(() => {
+ services.checkServiceStatus('snmp-gateway');
+ });
+
+ services.deleteService('snmp-gateway');
+ });
+
+ it('should create and delete snmp-gateway service with version V3 and w/o privacy protocol', () => {
+ services.navigateTo('create');
+ services.addService('snmp-gateway', false, '1', 'V3', false);
+ services.checkExist('snmp-gateway', true);
+
+ services.clickServiceTab('snmp-gateway', 'Details');
+ cy.get('cd-service-details').within(() => {
+ services.checkServiceStatus('snmp-gateway');
+ });
+
+ services.deleteService('snmp-gateway');
+ });
+
+ it('should create ingress as unmanaged', () => {
+ services.navigateTo('create');
+ services.addService('ingress', false, undefined, undefined, undefined, true);
+ services.checkExist('ingress.rgw.foo', true);
+ services.isUnmanaged('ingress.rgw.foo', true);
+ services.deleteService('ingress.rgw.foo');
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts
new file mode 100644
index 000000000..f4b5499f0
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts
@@ -0,0 +1,83 @@
+/* tslint:disable*/
+import { ServicesPageHelper } from '../../cluster/services.po';
+import { NFSPageHelper } from '../../orchestrator/workflow/nfs/nfs-export.po';
+import { BucketsPageHelper } from '../../rgw/buckets.po';
+/* tslint:enable*/
+
+describe('nfsExport page', () => {
+ const nfsExport = new NFSPageHelper();
+ const services = new ServicesPageHelper();
+ const buckets = new BucketsPageHelper();
+ const bucketName = 'e2e.nfs.bucket';
+ // @TODO: uncomment this when a CephFS volume can be created through Dashboard.
+ // const fsPseudo = '/fsPseudo';
+ const rgwPseudo = '/rgwPseudo';
+ const editPseudo = '/editPseudo';
+ const backends = ['CephFS', 'Object Gateway'];
+ const squash = 'no_root_squash';
+ const client: object = { addresses: '192.168.0.10' };
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ nfsExport.navigateTo();
+ });
+
+ describe('breadcrumb test', () => {
+ it('should open and show breadcrumb', () => {
+ nfsExport.expectBreadcrumbText('NFS');
+ });
+ });
+
+ describe('Create, edit and delete', () => {
+ it('should create an NFS cluster', () => {
+ services.navigateTo('create');
+
+ services.addService('nfs');
+
+ services.checkExist('nfs.testnfs', true);
+ services.getExpandCollapseElement().click();
+ services.checkServiceStatus('nfs');
+ });
+
+ it('should create a nfs-export with RGW backend', () => {
+ buckets.navigateTo('create');
+ buckets.create(bucketName, 'dashboard', 'default-placement');
+
+ nfsExport.navigateTo();
+ nfsExport.existTableCell(rgwPseudo, false);
+ nfsExport.navigateTo('create');
+ nfsExport.create(backends[1], squash, client, rgwPseudo, bucketName);
+ nfsExport.existTableCell(rgwPseudo);
+ });
+
+ // @TODO: uncomment this when a CephFS volume can be created through Dashboard.
+ // it('should create a nfs-export with CephFS backend', () => {
+ // nfsExport.navigateTo();
+ // nfsExport.existTableCell(fsPseudo, false);
+ // nfsExport.navigateTo('create');
+ // nfsExport.create(backends[0], squash, client, fsPseudo);
+ // nfsExport.existTableCell(fsPseudo);
+ // });
+
+ it('should show Clients', () => {
+ nfsExport.clickTab('cd-nfs-details', rgwPseudo, 'Clients (1)');
+ cy.get('cd-nfs-details').within(() => {
+ nfsExport.getTableCount('total').should('be.gte', 0);
+ });
+ });
+
+ it('should edit an export', () => {
+ nfsExport.editExport(rgwPseudo, editPseudo);
+
+ nfsExport.existTableCell(editPseudo);
+ });
+
+ it('should delete exports and bucket', () => {
+ nfsExport.delete(editPseudo);
+
+ buckets.navigateTo();
+ buckets.delete(bucketName);
+ });
+ });
+});
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts
new file mode 100644
index 000000000..c700ef058
--- /dev/null
+++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts
@@ -0,0 +1,52 @@
+/* tslint:disable*/
+import { PageHelper } from '../../../page-helper.po';
+/* tslint:enable*/
+
+const pages = {
+ index: { url: '#/nfs', id: 'cd-nfs-list' },
+ create: { url: '#/nfs/create', id: 'cd-nfs-form' }
+};
+
+export class NFSPageHelper extends PageHelper {
+ pages = pages;
+
+ @PageHelper.restrictTo(pages.create.url)
+ create(backend: string, squash: string, client: object, pseudo: string, rgwPath?: string) {
+ this.selectOption('cluster_id', 'testnfs');
+ // select a storage backend
+ this.selectOption('name', backend);
+ if (backend === 'CephFS') {
+ this.selectOption('fs_name', 'myfs');
+
+ cy.get('#security_label').click({ force: true });
+ } else {
+ cy.get('input[data-testid=rgw_path]').type(rgwPath);
+ }
+
+ cy.get('input[name=pseudo]').type(pseudo);
+ this.selectOption('squash', squash);
+
+ // Add clients
+ cy.get('button[name=add_client]').click({ force: true });
+ cy.get('input[name=addresses]').type(client['addresses']);
+
+ // Check if we can remove clients and add it again
+ cy.get('span[name=remove_client]').click({ force: true });
+ cy.get('button[name=add_client]').click({ force: true });
+ cy.get('input[name=addresses]').type(client['addresses']);
+
+ cy.get('cd-submit-button').click();
+ }
+
+ editExport(pseudo: string, editPseudo: string) {
+ this.navigateEdit(pseudo);
+
+ cy.get('input[name=pseudo]').clear().type(editPseudo);
+
+ cy.get('cd-submit-button').click();
+
+ // Click the export and check its details table for updated content
+ this.getExpandCollapseElement(editPseudo).click();
+ cy.get('.active.tab-pane').should('contain.text', editPseudo);
+ }
+}