summaryrefslogtreecommitdiffstats
path: root/src/test/behave_tests/features
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/test/behave_tests/features
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/behave_tests/features')
-rw-r--r--src/test/behave_tests/features/ceph_osd_test.feature49
-rw-r--r--src/test/behave_tests/features/ceph_shell_test.feature64
-rw-r--r--src/test/behave_tests/features/cephadm_test.feature24
-rw-r--r--src/test/behave_tests/features/environment.py207
-rw-r--r--src/test/behave_tests/features/kcli_handler.py88
-rw-r--r--src/test/behave_tests/features/steps/ceph_steps.py106
-rw-r--r--src/test/behave_tests/features/validation_util.py19
7 files changed, 557 insertions, 0 deletions
diff --git a/src/test/behave_tests/features/ceph_osd_test.feature b/src/test/behave_tests/features/ceph_osd_test.feature
new file mode 100644
index 000000000..e9a37a4c9
--- /dev/null
+++ b/src/test/behave_tests/features/ceph_osd_test.feature
@@ -0,0 +1,49 @@
+@osd
+Feature: Tests related to OSD creation
+ In order to be able to provide storage services
+ As an system administrator
+ I want to install a Ceph cluster in the following server infrastructure:
+ - 3 nodes with 8Gb RAM, 4 CPUs, and 3 storage devices of 20Gb each.
+ - Using Fedora32 image in each node
+ - Configure ceph cluster in following way
+ - with number of OSD 0
+
+
+ Scenario: Create OSDs
+ Given I log as root into ceph-node-00
+ When I execute in cephadm_shell
+ """
+ ceph orch device ls
+ """
+ Then I wait for 60 seconds until I get
+ """
+ ceph-node-00.cephlab.com /dev/vdb hdd Unknown N/A N/A Yes
+ ceph-node-01.cephlab.com /dev/vdb hdd Unknown N/A N/A Yes
+ ceph-node-02.cephlab.com /dev/vdb hdd Unknown N/A N/A Yes
+ """
+ Then I execute in cephadm_shell
+ """
+ ceph orch daemon add osd ceph-node-00.cephlab.com:/dev/vdb
+ ceph orch daemon add osd ceph-node-01.cephlab.com:/dev/vdb
+ ceph orch daemon add osd ceph-node-02.cephlab.com:/dev/vdb
+ """
+ Then I execute in cephadm_shell
+ """
+ ceph orch device ls
+ """
+ Then I wait for 60 seconds until I get
+ """
+ ceph-node-00.cephlab.com /dev/vdb hdd Unknown N/A N/A No
+ ceph-node-01.cephlab.com /dev/vdb hdd Unknown N/A N/A No
+ ceph-node-02.cephlab.com /dev/vdb hdd Unknown N/A N/A No
+ """
+ Then I execute in cephadm_shell
+ """
+ ceph -s
+ """
+ Then I get results which contain
+ """
+ services:
+ mon: 3 daemons, quorum ceph-node-00.cephlab.com,ceph-node-01,ceph-node-02
+ osd: 3 osds: 3 up
+ """
diff --git a/src/test/behave_tests/features/ceph_shell_test.feature b/src/test/behave_tests/features/ceph_shell_test.feature
new file mode 100644
index 000000000..b158093a0
--- /dev/null
+++ b/src/test/behave_tests/features/ceph_shell_test.feature
@@ -0,0 +1,64 @@
+@ceph_shell
+Feature: Testing basic ceph shell commands
+ In order to be able to provide storage services
+ As an system administrator
+ I want to install a Ceph cluster in the following server infrastructure:
+ - 3 nodes with 8Gb RAM, 4 CPUs, and 3 storage devices of 20Gb each.
+ - Using Fedora32 image in each node
+
+
+ Scenario: Execute ceph command to check status
+ Given I log as root into ceph-node-00
+ When I execute in cephadm_shell
+ """
+ ceph orch status
+ """
+ Then I get results which contain
+ """
+ Backend: cephadm
+ Available: Yes
+ Paused: No
+ """
+
+
+ Scenario: Execute ceph command to check orch host list
+ Given I log as root into ceph-node-00
+ When I execute in cephadm_shell
+ """
+ ceph orch host ls
+ """
+ Then I get results which contain
+ """
+ HOST LABELS
+ ceph-node-00.cephlab.com _admin
+ """
+
+
+ Scenario: Execute ceph command to check orch device list
+ Given I log as root into ceph-node-00
+ When I execute in cephadm_shell
+ """
+ ceph orch device ls
+ """
+ Then I get results which contain
+ """
+ Hostname Path Type
+ ceph-node-00.cephlab.com /dev/vdb hdd
+ ceph-node-00.cephlab.com /dev/vdc hdd
+ """
+
+
+ Scenario: Execute ceph command to check orch
+ Given I log as root into ceph-node-00
+ When I execute in cephadm_shell
+ """
+ ceph orch ls
+ """
+ Then I wait for 60 seconds until I get
+ """
+ NAME RUNNING
+ grafana 1/1
+ mgr 2/2
+ mon 1/5
+ prometheus 1/1
+ """
diff --git a/src/test/behave_tests/features/cephadm_test.feature b/src/test/behave_tests/features/cephadm_test.feature
new file mode 100644
index 000000000..e3358bfbd
--- /dev/null
+++ b/src/test/behave_tests/features/cephadm_test.feature
@@ -0,0 +1,24 @@
+@cephadm
+Feature: Install a basic Ceph cluster
+ In order to be able to provide storage services
+ As an system administrator
+ I want to install a Ceph cluster in the following server infrastructure:
+ - 3 nodes with 8Gb RAM, 4 CPUs, and 3 storage devices of 20Gb each.
+ - Using Fedora32 image in each node
+
+
+ Scenario: Execute commands in cluster nodes
+ Given I log as root into ceph-node-00
+ And I execute in host
+ """
+ curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/octopus/src/cephadm/cephadm
+ chmod +x cephadm
+ """
+ When I execute in host
+ """
+ cephadm version
+ """
+ Then I get results which contain
+ """
+ ceph version quincy (dev)
+ """
diff --git a/src/test/behave_tests/features/environment.py b/src/test/behave_tests/features/environment.py
new file mode 100644
index 000000000..fdd175e60
--- /dev/null
+++ b/src/test/behave_tests/features/environment.py
@@ -0,0 +1,207 @@
+import logging
+import os
+import re
+
+from jinja2 import Template
+from kcli_handler import is_bootstrap_script_complete, execute_kcli_cmd
+
+KCLI_PLANS_DIR = "generated_plans"
+KCLI_PLAN_NAME = "behave_test_plan"
+
+Kcli_Config = {
+ "nodes": 1,
+ "pool": "default",
+ "network": "default",
+ "domain": "cephlab.com",
+ "prefix": "ceph",
+ "numcpus": 1,
+ "memory": 1024,
+ "image": "fedora33",
+ "notify": False,
+ "admin_password": "password",
+ "disks": [150, 3],
+}
+
+Bootstrap_Config = {
+ "configure_osd": False
+}
+
+
+def _write_file(file_path, data):
+ with open(file_path, "w") as file:
+ file.write(data)
+
+
+def _read_file(file_path):
+ file = open(file_path, "r")
+ data = "".join(file.readlines())
+ file.close()
+ return data
+
+
+def _loaded_templates():
+ temp_dir = os.path.join(os.getcwd(), "template")
+ logging.info("Loading templates")
+ kcli = _read_file(os.path.join(temp_dir, "kcli_plan_template"))
+ script = _read_file(os.path.join(temp_dir, "bootstrap_script_template"))
+ return (
+ Template(kcli),
+ Template(script)
+ )
+
+
+def _clean_generated(dir_path):
+ logging.info("Deleting generated files")
+ for file in os.listdir(dir_path):
+ os.remove(os.path.join(dir_path, file))
+ os.rmdir(dir_path)
+
+
+def _parse_value(value):
+ if value.isnumeric():
+ return int(value)
+
+ if value.endswith("gb"):
+ return int(value.replace("gb", "")) * 1024
+ elif value.endswith("mb"):
+ return value.replace("mb", "")
+ return value
+
+
+def _parse_to_config_dict(values, config):
+ for key in values.keys():
+ config[key] = _parse_value(values[key])
+
+
+def _parse_vm_description(specs):
+ """
+ Parse's vm specfication description into configuration dictionary
+ """
+ kcli_config = Kcli_Config.copy()
+ parsed_str = re.search(
+ r"(?P<nodes>[\d]+) nodes with (?P<memory>[\w\.-]+) ram",
+ specs.lower(),
+ )
+ if parsed_str:
+ for spec_key in parsed_str.groupdict().keys():
+ kcli_config[spec_key] = _parse_value(parsed_str.group(spec_key))
+ parsed_str = re.search(r"(?P<numcpus>[\d]+) cpus", specs.lower())
+ if parsed_str:
+ kcli_config["numcpus"] = parsed_str.group("numcpus")
+ parsed_str = re.search(
+ r"(?P<disk>[\d]+) storage devices of (?P<volume>[\w\.-]+)Gb each",
+ specs,
+ )
+ if parsed_str:
+ kcli_config["disks"] = [
+ _parse_value(parsed_str.group("volume"))
+ ] * _parse_value(parsed_str.group("disk"))
+ parsed_str = re.search(r"(?P<image>[\w\.-]+) image", specs.lower())
+ if parsed_str:
+ kcli_config["image"] = parsed_str.group("image")
+ return kcli_config
+
+
+def _parse_ceph_description(specs):
+ """
+ Parse the ceph boostrap script configuration descriptions.
+ """
+ bootstrap_script_config = Bootstrap_Config.copy()
+ parsed_str = re.search(
+ r"OSD (?P<osd>[\w\.-]+)", specs
+ )
+ if parsed_str:
+ bootstrap_script_config["configure_osd"] = True if _parse_value(
+ parsed_str.group("osd")
+ ) else False
+ return bootstrap_script_config
+
+
+def _handle_kcli_plan(command_type, plan_file_path=None):
+ """
+ Executes the kcli vm create and delete command according
+ to the provided configuration.
+ """
+ op = None
+ if command_type == "create":
+ # TODO : Before creating kcli plan check for exisitng kcli plans
+ op, code = execute_kcli_cmd(
+ f"create plan -f {plan_file_path} {KCLI_PLAN_NAME}"
+ )
+ if code:
+ print(f"Failed to create kcli plan\n Message: {op}")
+ exit(1)
+ elif command_type == "delete":
+ op, code = execute_kcli_cmd(f"delete plan {KCLI_PLAN_NAME} -y")
+ print(op)
+
+
+def has_ceph_configuration(descriptions, config_line):
+ """
+ Checks for ceph cluster configuration in descriptions.
+ """
+ index_config = -1
+ for line in descriptions:
+ if line.lower().startswith(config_line):
+ index_config = descriptions.index(line)
+
+ if index_config != -1:
+ return (
+ descriptions[:index_config],
+ descriptions[index_config:],
+ )
+ return (
+ descriptions,
+ None,
+ )
+
+
+def before_feature(context, feature):
+ kcli_plans_dir_path = os.path.join(
+ os.getcwd(),
+ KCLI_PLANS_DIR,
+ )
+ if not os.path.exists(kcli_plans_dir_path):
+ os.mkdir(kcli_plans_dir_path)
+
+ vm_description, ceph_description = has_ceph_configuration(
+ feature.description,
+ "- configure ceph cluster",
+ )
+ loaded_kcli, loaded_script = _loaded_templates()
+
+ vm_feature_specs = " ".join(
+ [line for line in vm_description if line.startswith("-")]
+ )
+ vm_config = _parse_vm_description("".join(vm_feature_specs))
+ kcli_plan_path = os.path.join(kcli_plans_dir_path, "gen_kcli_plan.yml")
+ print(f"Kcli vm configureaton \n {vm_config}")
+ _write_file(
+ kcli_plan_path,
+ loaded_kcli.render(vm_config)
+ )
+
+ # Checks for ceph description if None set the default configurations
+ ceph_config = _parse_ceph_description(
+ "".join(ceph_description)
+ ) if ceph_description else Bootstrap_Config
+
+ print(f"Bootstrap configuraton \n {ceph_config}\n")
+ _write_file(
+ os.path.join(kcli_plans_dir_path, "bootstrap_cluster_dev.sh"),
+ loaded_script.render(ceph_config),
+ )
+
+ _handle_kcli_plan("create", os.path.relpath(kcli_plan_path))
+
+ if not is_bootstrap_script_complete():
+ print("Failed to complete bootstrap..")
+ _handle_kcli_plan("delete")
+ exit(1)
+ context.last_executed = {}
+
+
+def after_feature(context, feature):
+ if os.path.exists(KCLI_PLANS_DIR):
+ _clean_generated(os.path.abspath(KCLI_PLANS_DIR))
+ _handle_kcli_plan("delete")
diff --git a/src/test/behave_tests/features/kcli_handler.py b/src/test/behave_tests/features/kcli_handler.py
new file mode 100644
index 000000000..1e28c7ff4
--- /dev/null
+++ b/src/test/behave_tests/features/kcli_handler.py
@@ -0,0 +1,88 @@
+import subprocess
+import time
+import os
+
+
+kcli_exec = r"""
+podman run --net host -it --rm --security-opt label=disable
+ -v $HOME/.ssh:/root/.ssh -v $HOME/.kcli:/root/.kcli
+ -v /var/lib/libvirt/images:/var/lib/libvirt/images
+ -v /var/run/libvirt:/var/run/libvirt -v $PWD:/workdir
+ -v /var/tmp:/ignitiondir jolmomar/kcli
+"""
+
+
+def _create_kcli_cmd(command):
+ cmd = kcli_exec.replace("$HOME", os.getenv("HOME"))
+ cmd = cmd.replace("$PWD", os.getenv("PWD"))
+ kcli = cmd.replace("\n", "").split(" ")
+ return kcli + command.split(" ")
+
+
+def is_bootstrap_script_complete():
+ """
+ Checks for status of bootstrap script executions.
+ """
+ timeout = 0
+ command = " ".join(
+ [
+ f'"{cmd}"' for cmd in
+ "journalctl --no-tail --no-pager -t cloud-init".split(" ")
+ ]
+ )
+ cmd = _create_kcli_cmd(
+ f'ssh ceph-node-00 {command} | grep "Bootstrap complete."'
+ )
+ while timeout < 10: # Totally waits for 5 mins before giving up
+ proc = subprocess.run(cmd, capture_output=True, text=True)
+ if "Bootstrap complete." in proc.stdout:
+ print("Bootstrap script completed successfully")
+ return True
+ timeout += 1
+ print("Waiting for bootstrap_cluster script...")
+ print(proc.stdout[len(proc.stdout) - 240:])
+ time.sleep(30)
+ print(
+ f"Timeout reached {30*timeout}. Giving up for boostrap to complete"
+ )
+ return False
+
+
+def execute_kcli_cmd(command):
+ """
+ Executes the kcli command by combining the provided command
+ with kcli executable command.
+ """
+ cmd = _create_kcli_cmd(command)
+ print(f"Executing kcli command : {command}")
+ try:
+ proc = subprocess.run(
+ cmd,
+ capture_output=True,
+ text=True,
+ # env=dict(STORAGE_OPTS=''),
+ )
+ except Exception as ex:
+ print(f"Error executing kcli command\n{ex}")
+
+ op = proc.stderr if proc.stderr else proc.stdout
+ return (op, proc.returncode)
+
+
+def execute_ssh_cmd(vm_name, shell, command):
+ """
+ Executes the provided ssh command on the provided vm machine
+ """
+ if shell == "cephadm_shell":
+ command = f"cephadm shell {command}"
+ sudo_cmd = f"sudo -i {command}".split(" ")
+ sudo_cmd = " ".join([f'"{cmd}"' for cmd in sudo_cmd])
+ cmd = _create_kcli_cmd(f"ssh {vm_name} {sudo_cmd}")
+ print(f"Executing ssh command : {cmd}")
+ try:
+ proc = subprocess.run(cmd, capture_output=True, text=True)
+ except Exception as ex:
+ print(f"Error executing ssh command: {ex}")
+
+ op = proc.stderr if proc.stderr else proc.stdout
+ return (op, proc.returncode)
diff --git a/src/test/behave_tests/features/steps/ceph_steps.py b/src/test/behave_tests/features/steps/ceph_steps.py
new file mode 100644
index 000000000..a96aa48ad
--- /dev/null
+++ b/src/test/behave_tests/features/steps/ceph_steps.py
@@ -0,0 +1,106 @@
+import time
+
+from behave import given, when, then
+from kcli_handler import execute_ssh_cmd
+from validation_util import str_to_list
+
+
+@given("I log as root into {node}")
+def login_to_node(context, node):
+ context.node = node
+
+
+@given("I execute in {shell}")
+def init_step_execute(context, shell):
+ commands = context.text.split("\n")
+ for command in commands:
+ op, code = execute_ssh_cmd(context.node, shell, command)
+ if code:
+ raise Exception("Failed to execute")
+ context.last_executed["cmd"] = command
+ context.last_executed["shell"] = shell
+
+
+@when("I execute in {shell}")
+@then("I execute in {shell}")
+def execute_step(context, shell):
+ if context.node is None:
+ raise Exception("Failed not logged into virtual machine")
+ for command in context.text.split("\n"):
+ output, return_code = execute_ssh_cmd(context.node, shell, command)
+ context.last_executed["cmd"] = command
+ context.last_executed["shell"] = shell
+ if return_code != 0:
+ raise Exception(f"Failed to execute ssh\n Message:{output}")
+ context.output = str_to_list(output)
+ print(f"Executed output : {context.output}")
+
+
+@then("Execute in {shell} only {command}")
+def execute_only_one_step(context, shell, command):
+ """
+ Run's single command and doesn't use multi-line
+ :params command: given command to execute
+ """
+ if context.node is None:
+ raise Exception("Failed not logged into virtual machine")
+ output, return_code = execute_ssh_cmd(context.node, shell, command)
+ context.last_executed["cmd"] = command
+ context.last_executed["shell"] = shell
+ if return_code != 0:
+ raise Exception(f"Failed to execute ssh\nMessage:{output}")
+ context.output = str_to_list(output)
+ print(f"Executed output : {context.output}")
+
+
+@then("I wait for {time_out:n} seconds until I get")
+def execute_and_wait_until_step(context, time_out):
+ wait_time = int(time_out/4)
+ context.found_all_keywords = False
+ if context.node is None:
+ raise Exception("Failed not logged into virtual machine")
+ exec_shell = context.last_executed['shell']
+ exec_cmd = context.last_executed['cmd']
+ if exec_shell is None and exec_cmd is None:
+ raise Exception("Last executed command not found..")
+
+ expected_output = str_to_list(context.text)
+ while wait_time < time_out and not context.found_all_keywords:
+ found_keys = []
+ context.execute_steps(
+ f"then Execute in {exec_shell} only {exec_cmd}"
+ )
+
+ executed_output = context.output
+ for expected_line in expected_output:
+ for op_line in executed_output:
+ if set(expected_line).issubset(set(op_line)):
+ found_keys.append(" ".join(expected_line))
+
+ if len(found_keys) != len(expected_output):
+ print(f"Waiting for {int(time_out/4)} seconds")
+ time.sleep(int(time_out/4))
+ wait_time += int(time_out/4)
+ else:
+ print("Found all expected keywords")
+ context.found_all_keywords = True
+ break
+ if not context.found_all_keywords:
+ print(
+ f"Timeout reached {time_out}. Giving up on waiting for keywords"
+ )
+
+
+@then("I get results which contain")
+def validation_step(context):
+ expected_keywords = str_to_list(context.text)
+ output_lines = context.output
+
+ for keys_line in expected_keywords:
+ found_keyword = False
+ for op_line in output_lines:
+ if set(keys_line).issubset(set(op_line)):
+ found_keyword = True
+ output_lines.remove(op_line)
+ if not found_keyword:
+ assert False, f"Not found {keys_line}"
diff --git a/src/test/behave_tests/features/validation_util.py b/src/test/behave_tests/features/validation_util.py
new file mode 100644
index 000000000..abe441462
--- /dev/null
+++ b/src/test/behave_tests/features/validation_util.py
@@ -0,0 +1,19 @@
+
+def str_to_list(string):
+ """
+ Converts the string into list removing whitespaces
+ """
+ string = string.replace('\t', '\n')
+ return [
+ [
+ key for key in line.split(' ')
+ if key != ''
+ ]
+ for line in string.split('\n')
+ if line != ''
+ ]
+
+
+def assert_str_in_list(keyword_list, output_list):
+ for keyword in keyword_list:
+ assert keyword in output_list, f" Not found {keyword}"