summaryrefslogtreecommitdiffstats
path: root/ansible_collections/infinidat/infinibox/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/infinidat/infinibox/playbooks')
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/ansible.cfg6
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml122
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml50
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml105
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/inventory2
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml295
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml47
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml631
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml109
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml183
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml18
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml234
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml32
13 files changed, 1834 insertions, 0 deletions
diff --git a/ansible_collections/infinidat/infinibox/playbooks/ansible.cfg b/ansible_collections/infinidat/infinibox/playbooks/ansible.cfg
new file mode 100644
index 000000000..2024423d0
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/ansible.cfg
@@ -0,0 +1,6 @@
+[defaults]
+doc_fragment_plugins = ../doc_fragments
+filter_plugins = ../plugins/filters
+library = ../plugins/modules
+module_utils = ../plugins/module_utils
+roles_path = ../roles
diff --git a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml
new file mode 100644
index 000000000..54978a53e
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml
@@ -0,0 +1,122 @@
+---
+### Localhost
+- hosts: forensics
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ vars:
+ network_space: InfiniSafe-Fenced-Network # iSCSI
+ service: ISCSI_SERVICE
+ pool: infinisafe
+ volume: app_vol
+ snap: app_snap
+ host: forensic-validation-host
+ host_iqn: iqn.1993-08.org.debian:01:62ebda3b76cc # io-wt-35
+ ibox_portal: 172.31.32.145
+ ibox: ibox1521
+ ibox_iqn: iqn.2009-11.com.infinidat:storage:infinibox-sn-1521
+ tasks:
+
+ - name: Create {{ service }} network space named {{ network_space }}
+ infini_network_space:
+ name: "{{ network_space }}"
+ state: present
+ service: "{{ service }}"
+ interfaces:
+ - 1680
+ - 1679
+ - 1678
+ netmask: 19
+ network: 172.31.32.0
+ default_gateway: 172.31.63.254
+ # rate_limit: 8
+ # mtu: 1280
+ ips:
+ - 172.31.32.145
+ - 172.31.32.146
+ - 172.31.32.147
+ - 172.31.32.148
+ - 172.31.32.149
+ - 172.31.32.150
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Create forensic host {{ host }}
+ infini_host:
+ name: "{{ host }}"
+ state: present
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Map snapshot {{ snap }} to host {{ host }}
+ infini_map:
+ host: "{{ host }}"
+ volume: "{{ snap }}"
+ state: present
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Add port to host {{ host }}
+ infini_port:
+ host: "{{ host }}"
+ iqns: "{{ host_iqn }}"
+ state: present
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ ### Forensics Host
+ - name: Connect forensics host {{ host }} to Infinibox {{ ibox }}
+ ansible.builtin.shell: |
+ iscsiadm --mode discoverydb --type sendtargets --portal {{ ibox_portal }} --discover
+ iscsiadm --mode node --targetname={{ ibox_iqn }} --op update --name=node.session.auth.username --value={{ user }}
+ iscsiadm --mode discovery --type sendtargets --portal {{ ibox_portal }} --op show
+ iscsiadm --mode node --targetname {{ ibox_iqn }} --portal {{ ibox_portal }} --login
+ rescan-scsi-bus.sh
+ changed_when: false
+ become: true
+
+ # Run forensic tests on snapshot {{ snap }}
+ - name: Forensically test snapshot {{ snap }} is clean using host {{ host }}
+ ansible.builtin.shell: |
+ true
+ changed_when: false
+ register: is_snapshot_clean
+
+ ### Localhost
+ - name: debug
+ ansible.builtin.debug:
+ msg: Snapshot {{ snap }} PASSED testing
+ when: is_snapshot_clean.rc == 0
+ delegate_to: localhost
+
+ - name: debug
+ ansible.builtin.debug:
+ msg: Snapshot {{ snap }} FAILED testing. Do not use this snapshot.
+ when: is_snapshot_clean.rc != 0
+ delegate_to: localhost
+
+ - name: Restoring volume {{ volume }} from known clean snapshot {{ snap }}
+ infini_vol:
+ name: "{{ snap }}"
+ state: present
+ parent_volume_name: "{{ volume }}"
+ volume_type: snapshot
+ restore_volume_from_snapshot: true
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ when: is_snapshot_clean.rc == 0
+ delegate_to: localhost
diff --git a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml
new file mode 100644
index 000000000..5d58b9741
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml
@@ -0,0 +1,50 @@
+---
+- hosts: localhost
+ gather_facts: true # Required for ansible_date_time
+ collections:
+ - infinidat.infinibox
+ vars:
+ network_space: InfiniSafe-Fenced-Network # iSCSI
+ service: ISCSI_SERVICE
+ pool: infinisafe
+ volume: app_vol
+ snap: app_snap
+ host: forensic-validation-host
+ host_iqn: iqn.1993-08.org.debian:01:62ebda3b76cc # io-wt-35
+ tasks:
+
+ - name: Create pool {{ pool }}
+ infini_pool:
+ name: "{{ pool }}"
+ size: 1TB
+ vsize: 1TB
+ state: present
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Create volume {{ volume }} under pool {{ pool }}
+ infini_vol:
+ name: "{{ volume }}"
+ size: 1GB
+ pool: "{{ pool }}"
+ state: present
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Create and lock (1 minute) snapshot {{ snap }} from volume {{ volume }}
+ infini_vol:
+ name: "{{ snap }}"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ volume }}"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=1) }}"
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml
new file mode 100644
index 000000000..6213f6c08
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml
@@ -0,0 +1,105 @@
+---
+### Localhost
+- hosts: forensics
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ vars:
+ network_space: InfiniSafe-Fenced-Network # iSCSI
+ service: ISCSI_SERVICE
+ pool: infinisafe
+ volume: app_vol
+ snap: app_snap
+ host: forensic-validation-host
+ host_iqn: iqn.1993-08.org.debian:01:62ebda3b76cc # io-wt-35
+ ibox_portal: 172.31.32.145
+ ibox: ibox1521
+ ibox_iqn: iqn.2009-11.com.infinidat:storage:infinibox-sn-1521
+ ibox_portals: 172.31.32.148 172.31.32.146 172.31.32.149 172.31.32.145 172.31.32.150 172.31.32.147
+ tasks:
+
+ - name: Unmap snapshot {{ snap }} from host {{ host }}
+ infini_map:
+ host: "{{ host }}"
+ volume: "{{ snap }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Remove port from host {{ host }}
+ infini_port:
+ host: "{{ host }}"
+ iqns: "{{ host_iqn }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+### Forensics Host
+ - name: Disconnect forensics host {{ host }} from Infinibox {{ ibox }}
+ ansible.builtin.shell: |
+ for i in {{ ibox_portals }}; do
+ iscsiadm --mode node --target {{ ibox_iqn }} -p $i --logout
+ done
+ for i in {{ ibox_portals }}; do
+ iscsiadm --mode discoverydb -t sendtargets -p $i -o delete --discover
+ done
+ changed_when: true
+ become: true
+
+### Localhost
+ - name: Remove network space named {{ network_space }}
+ infini_network_space:
+ name: "{{ network_space }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Remove snapshot {{ snap }} created from volume {{ volume }}
+ infini_vol:
+ name: "{{ snap }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Remove volume {{ volume }} under pool {{ pool }}
+ infini_vol:
+ name: "{{ volume }}"
+ pool: "{{ pool }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Remove pool {{ pool }}
+ infini_pool:
+ name: "{{ pool }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
+
+ - name: Remove forensic host {{ host }}
+ infini_host:
+ name: "{{ host }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ delegate_to: localhost
diff --git a/ansible_collections/infinidat/infinibox/playbooks/inventory b/ansible_collections/infinidat/infinibox/playbooks/inventory
new file mode 100644
index 000000000..15954ef41
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/inventory
@@ -0,0 +1,2 @@
+[forensics]
+io-wt-35.lab.wt.us.infinidat.com ansible_python_interpreter=python3.8
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml
new file mode 100644
index 000000000..cf807fcbb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml
@@ -0,0 +1,295 @@
+---
+- hosts: localhost
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ tasks:
+
+ - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol specifying both a cluster and a host
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"both to be provided" not in result.msg'
+ - not result.failed
+
+ - name: NEGATIVE test -> Unmap volume {{ auto_prefix }}vol specifying both a cluster and a host
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"both to be provided" not in result.msg'
+ - not result.failed
+
+ - name: NEGATIVE test -> Stat volume {{ auto_prefix }}vol specifying both a cluster and a host
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"both to be provided" not in result.msg'
+ - not result.failed
+
+ - name: POSITIVE test -> Create pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ size: 1TB
+ vsize: 1TB
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create host {{ auto_prefix }}host1 for cluster mapping
+ infini_host:
+ name: "{{ auto_prefix }}host1"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create host {{ auto_prefix }}host2 for cluster mapping
+ infini_host:
+ name: "{{ auto_prefix }}host2"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create host {{ auto_prefix }}host3 for host mapping
+ infini_host:
+ name: "{{ auto_prefix }}host3"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create cluster {{ auto_prefix }}cluster with hosts 1 and 2 for cluster mapping
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host1"
+ host_cluster_state: present
+ - host_name: "{{ auto_prefix }}host2"
+ host_cluster_state: present
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster again
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"already exists using" not in result.msg'
+ - result.changed
+
+ - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: cluster_stat
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "Map stat: {{ cluster_stat }}"
+
+ - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 again
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"already exists using" not in result.msg'
+ - result.changed
+
+ - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: host_stat
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "Map stat: {{ host_stat }}"
+
+ # Since the host is in the cluster and the volume is already mapped to the cluster, mapping the volume to the host becomes a NOOP.
+ - name: IDEMPOTENT test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host1 which is also in {{ auto_prefix }}cluster
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"already exists using" not in result.msg'
+ - result.changed
+
+### Test mapping of volume to a host using specified lun.
+ - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 again using lun 99 when already mapped
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ lun: 99
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: new_lun_fail
+ failed_when:
+ - '"Cannot change the lun" not in new_lun_fail.msg'
+ - result.changed
+
+ - name: POSITIVE test -> Unmap volume {{ auto_prefix }}vol from host {{ auto_prefix }}host3
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 using lun 99 when not already mapped
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ lun: 99
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 to check lun 99 used
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: new_lun_success
+ failed_when:
+ - '"using lun 99" not in new_lun_success.msg'
+ - new_lun_success.volume_lun != 99
+
+ - name: POSITIVE test -> Show lun mapping message
+ ansible.builtin.debug:
+ msg: "Changed lun mapping message: {{ new_lun_success.msg }}"
+### End test mapping of volume to a host using specified lun.
+
+### Test mapping of volume to a cluster using specified lun.
+ - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster again using lun 98 when already mapped
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ lun: 98
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: new_lun_fail
+ failed_when:
+ - '"Cannot change the lun" not in new_lun_fail.msg'
+ - result.changed
+
+ - name: POSITIVE test -> Unmap volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster using lun 98 when not already mapped
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ lun: 98
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster to check lun 98 used
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: new_lun_success
+ failed_when:
+ - '"using lun 98" not in new_lun_success.msg'
+ - new_lun_success.volume_lun != 98
+
+ - name: POSITIVE test -> Show lun mapping message
+ ansible.builtin.debug:
+ msg: "Changed lun mapping message: {{ new_lun_success.msg }}"
+ ### End test mapping of volume to a cluster using specified lun.
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml
new file mode 100644
index 000000000..b8bced550
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml
@@ -0,0 +1,47 @@
+---
+- hosts: localhost
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ vars:
+ name: iSCSI
+ service: ISCSI_SERVICE
+ tasks:
+
+ - name: POSITIVE test -> Create {{ service }} network space named {{ name }}
+ infini_network_space:
+ name: "{{ name }}"
+ state: present
+ interfaces:
+ - 1680
+ - 1679
+ - 1678
+ service: "{{ service }}"
+ netmask: 19
+ network: 172.31.32.0
+ default_gateway: 172.31.63.254
+ # rate_limit: 8
+ # mtu: 1280
+ ips:
+ - 172.31.32.145
+ - 172.31.32.146
+ - 172.31.32.147
+ - 172.31.32.148
+ - 172.31.32.149
+ - 172.31.32.150
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: create_out
+
+ - name: debug
+ ansible.builtin.debug:
+ var: create_out
+
+ - name: POSITIVE test -> Stat {{ service }} network space named {{ name }}
+ infini_network_space:
+ name: "{{ name }}"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml
new file mode 100644
index 000000000..3ca9b5542
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml
@@ -0,0 +1,631 @@
+---
+- hosts: localhost
+ gather_facts: true # Required for ansible_date_time
+ collections:
+ - infinidat.infinibox
+ tasks:
+
+ - name: NEGATIVE test -> Create file system named {{ auto_prefix }}fs under a pool that does not exist
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool_missing"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'pool_missing' not in result.msg"
+ when: not ansible_check_mode
+
+ - name: POSITIVE test -> Create pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ size: 1TB
+ vsize: 1TB
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create default THIN, file system named {{ auto_prefix }}fs_default under pool {{ auto_prefix }}pool
+ # See IBox settings: Dataset default provisioning.
+ infini_fs:
+ name: "{{ auto_prefix }}fs_default"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat file system named {{ auto_prefix }}fs_default under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs_default"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: def_stat
+
+ - debug:
+ msg: stat - {{ def_stat }}
+
+ - name: POSITIVE test -> Create THICK file system named {{ auto_prefix }}fs_thick under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs_thick"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ thin_provision: false
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create THIN file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ thin_provision: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Change THIN file system named {{ auto_prefix }}fs to THICK provisioning
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ thin_provision: false
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Change THICK file system named {{ auto_prefix }}fs back to THIN provisioning
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ thin_provision: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Export file system {{ auto_prefix }}fs
+ infini_export:
+ name: "/{{ auto_prefix }}export"
+ filesystem: "{{ auto_prefix }}fs"
+ client_list:
+ - client: "*"
+ access: "RO"
+ no_root_squash: true
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat file system export {{ auto_prefix }}export
+ infini_export:
+ name: "/{{ auto_prefix }}export"
+ filesystem: "{{ auto_prefix }}fs"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Export file system {{ auto_prefix }}fs that does not exist
+ infini_export:
+ name: "/{{ auto_prefix }}export_missing"
+ filesystem: "{{ auto_prefix }}fs_missing"
+ client_list:
+ - client: "*"
+ access: "RO"
+ no_root_squash: true
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('File system.*not found')"
+ when: not ansible_check_mode
+
+ - name: POSITIVE test -> Create export client for export /{{ auto_prefix }}export
+ infini_export_client:
+ export: "/{{ auto_prefix }}export"
+ client: "20.20.20.20"
+ state: present
+ access_mode: "RO"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # TODO - stat export client
+
+ - name: NEGATIVE test -> Create export client for export that does not exist
+ infini_export_client:
+ export: "/{{ auto_prefix }}export_missing"
+ client: 20.20.20.20
+ state: present
+ access_mode: "RO"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('Export.*not found')"
+ when: not ansible_check_mode
+
+ - name: NEGATIVE test -> Create volume {{ auto_prefix }}vol_in_pool_missing under pool that does not exist
+ infini_vol:
+ name: "{{ auto_prefix }}vol_in_pool_missing"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool_missing"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'Pool not found' not in result.msg"
+ # failed_when: "'pool_missing' not in result.msg"
+ when: not ansible_check_mode
+
+ - name: POSITIVE test -> Create thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create thick volume {{ auto_prefix }}vol_thick under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol_thick"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ thin_provision: false
+ write_protected: true
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ register: output
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat vol serial number
+ infini_vol:
+ state: stat
+ serial: "{{ output.serial }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create snapshot {{ auto_prefix }}vol_snap from volume {{ auto_prefix }}vol
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: DEBUG test -> Find the current date-time (WARNING - Time marks when the playbook gathered facts)
+ ansible.builtin.debug:
+ var=ansible_date_time.iso8601_micro
+
+ # Note: For collection filters, Ansible does not honor the
+ # collections list at the top of this file.
+ # One must use a FQCN for filters such as
+ # infinidat.infinibox.delta_time.
+ - name: POSITIVE test -> Create and lock for 2 minutes snapshot {{ auto_prefix }}vol_snap_locked from volume {{ auto_prefix }}vol
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=2) }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: POSITIVE test -> Extend lock to 3 minutes for {{ auto_prefix }}vol_snap_locked without refresh
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_only: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to extend lock without refresh on a snapshot that does not exist.
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked_missing"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_only: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('Snapshot does not exist. Cannot comply')"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to create snapshot locked for 31 days, 31 days exceeds 30 day maximum lock time enforced by infini_vol module (not api)
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked_too_long"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(days=31) }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('snapshot_lock_expires_at exceeds.*days in the future')"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to remove locked snapshot {{ auto_prefix }}vol_snap_locked
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'Cannot delete snapshot. Locked' not in result.msg"
+
+ - name: POSITIVE test -> Wait for lock on {{ auto_prefix }}vol_snap_locked to expire
+ ansible.builtin.pause:
+ seconds: 181
+ prompt: Waiting for {{ auto_prefix }}vol_snap_locked to expire
+
+ - name: POSITIVE test -> Remove snapshot {{ auto_prefix }}vol_snap_locked with expired lock
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create host {{ auto_prefix }}host
+ infini_host:
+ name: "{{ auto_prefix }}host"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # Second host used for testing infini_cluster
+ - name: POSITIVE test -> Create host {{ auto_prefix }}host2
+ infini_host:
+ name: "{{ auto_prefix }}host2"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat host {{ auto_prefix }}host
+ infini_host:
+ name: "{{ auto_prefix }}host"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Add one wwns port to {{ auto_prefix }}host
+ infini_port:
+ host: PSUS_ANSIBLE_host
+ state: present
+ wwns:
+ - "21:00:00:24:ff:78:69:e4"
+ # iqns: []
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Add a second wwn port plus two iscsi ports to {{ auto_prefix }}host
+ infini_port:
+ host: PSUS_ANSIBLE_host
+ state: present
+ wwns:
+ - "21:00:00:24:ff:78:69:e5"
+ iqns:
+ - "iqn.1998-01.com.vmware:5d2f90f6-1987-c06c-4d2f-246e9602d5e0-aaaaaaaa"
+ - "iqn.1998-01.com.vmware:5d2f90f6-1987-c06c-4d2f-246e9602d5e0-bbbbbbbb"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat ports on {{ auto_prefix }}host
+ infini_port:
+ host: PSUS_ANSIBLE_host
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat host {{ auto_prefix }}host after ports added
+ infini_host:
+ name: "{{ auto_prefix }}host"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol to host that does not exist
+ infini_map:
+ host: "{{ auto_prefix }}host_missing"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('Neither host.*nor cluster.*found')"
+ when: not ansible_check_mode
+
+ - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Rescan after mapping
+ shell: |
+ rescan-scsi-bus.sh
+ become: True
+ register: rescan
+ failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+
+ - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Rescan with remove option after removing mapping
+ shell: |
+ rescan-scsi-bus.sh --remove
+ become: True
+ register: rescan
+ failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+
+ - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host using lun 0
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ lun: 0
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Rescan after mapping
+ shell: |
+ rescan-scsi-bus.sh
+ become: True
+ register: rescan
+ failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+
+ - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host using lun 0
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: check_lun
+ tags:
+ - dev
+
+ - name: ASSERT test -> Check lun 0 set
+ ansible.builtin.assert:
+ that:
+ - check_lun.volume_lun == 0
+ tags:
+ - dev
+
+ - name: POSITIVE test -> Create user {{ auto_prefix }}pool_admin_user with pool_admin role managing pool {{ auto_prefix }}pool
+ infini_user:
+ user_name: "{{ auto_prefix }}pool_admin_user"
+ user_email: "{{ auto_prefix }}pool_admin_user@example.com"
+ user_password: "secret1"
+ user_role: "pool_admin"
+ user_enabled: "true"
+ user_pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Disable user {{ auto_prefix }}pool_admin_user
+ infini_user:
+ user_name: "{{ auto_prefix }}pool_admin_user"
+ user_email: "{{ auto_prefix }}pool_admin_user@example.com"
+ user_password: "secret1"
+ user_role: "pool_admin"
+ user_enabled: "false"
+ user_pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat user {{ auto_prefix }}pool_admin_user
+ infini_user:
+ user_name: "{{ auto_prefix }}pool_admin_user"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Reset password for user {{ auto_prefix }}pool_admin_user
+ infini_user:
+ user_name: "{{ auto_prefix }}pool_admin_user"
+ user_password: "secret_new"
+ state: reset_password
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create user {{ auto_prefix }}admin_user with admin role
+ infini_user:
+ user_name: "{{ auto_prefix }}admin_user"
+ user_email: "{{ auto_prefix }}admin_user@example.com"
+ user_password: "secret2"
+ user_role: "admin"
+ user_enabled: "true"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create user {{ auto_prefix }}read_only_user with read_only role
+ infini_user:
+ user_name: "{{ auto_prefix }}read_only_user"
+ user_email: "{{ auto_prefix }}read_only_user@example.com"
+ user_password: "secret3"
+ user_role: "read_only"
+ user_enabled: "true"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create cluster {{ auto_prefix }}cluster with two hosts
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host"
+ host_cluster_state: present
+ - host_name: "{{ auto_prefix }}host2"
+ host_cluster_state: present
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove {{ auto_prefix }}host from cluster {{ auto_prefix }}cluster. Leave {{ auto_prefix }}host2.
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host"
+ host_cluster_state: absent
+ - host_name: "{{ auto_prefix }}host2"
+ host_cluster_state: present
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Re-add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster. Remove {{ auto_prefix }}host2.
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host"
+ host_cluster_state: present
+ - host_name: "{{ auto_prefix }}host2"
+ host_cluster_state: absent
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Re-add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster again. Remove {{ auto_prefix }}host2 again.
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host"
+ host_cluster_state: present
+ - host_name: "{{ auto_prefix }}host2"
+ host_cluster_state: absent
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Attempt to add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster with missing host_cluster_state key:value
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host"
+ # host_cluster_state: present
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'require host_name and host_cluster_state' not in result.msg"
+
+ - name: NEGATIVE test -> Attempt to add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster with an unsupported key:value
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ cluster_hosts:
+ - host_name: "{{ auto_prefix }}host"
+ host_cluster_state: present
+ host_unknown_key: unknown_value
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'require host_name and host_cluster_state' not in result.msg"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml
new file mode 100644
index 000000000..bb4db264e
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml
@@ -0,0 +1,109 @@
+---
+- hosts: localhost
+ gather_facts: true # Required for ansible_date_time
+ collections:
+ - infinidat.infinibox
+ tasks:
+
+ - name: POSITIVE test -> Create pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ size: 1TB
+ vsize: 1TB
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create snapshot {{ auto_prefix }}vol_snap from volume {{ auto_prefix }}vol
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: DEBUG test -> Find the current date-time (WARNING - Time marks when the playbook gathered facts)
+ ansible.builtin.debug:
+ var=ansible_date_time.iso8601_micro
+
+ # Note: For collection filters, Ansible does not honor the
+ # collections list at the top of this file.
+ # One must use a FQCN for filters such as
+ # infinidat.infinibox.delta_time.
+ - name: POSITIVE test -> Create and lock for 2 minutes snapshot {{ auto_prefix }}vol_snap_locked from volume {{ auto_prefix }}vol
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=2) }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: POSITIVE test -> Extend lock to 3 minutes for {{ auto_prefix }}vol_snap_locked without refresh
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_only: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to create snapshot locked for 31 days, 31 days exceeds 30 day maximum lock time enforced by infini_vol module (not API)
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked_too_long"
+ state: present
+ volume_type: snapshot
+ parent_volume_name: "{{ auto_prefix }}vol"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(days=31) }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('snapshot_lock_expires_at exceeds.*days in the future')"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to remove locked snapshot {{ auto_prefix }}vol_snap_locked
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'Cannot delete snapshot. Locked' not in result.msg"
+
+ - name: POSITIVE test -> Wait for lock on {{ auto_prefix }}vol_snap_locked to expire
+ ansible.builtin.pause:
+ seconds: 181
+ prompt: Waiting for {{ auto_prefix }}vol_snap_locked to expire
+
+ - name: POSITIVE test -> Lock expired. Remove snapshot {{ auto_prefix }}vol_snap_locked.
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap_locked"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml
new file mode 100644
index 000000000..8aaa765fb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml
@@ -0,0 +1,183 @@
+---
+- hosts: localhost
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ tasks:
+
+ - name: POSITIVE test -> Stat mapping of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host1
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: host_stat
+ failed_when:
+ - '"vol is mapped to host" not in host_stat.msg'
+ - '"host1" not in host_stat.msg'
+ - host_stat.failed
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "Map stat: {{ host_stat }}"
+
+ - name: POSITIVE test -> Stat mapping of volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: cluster_stat
+ failed_when:
+ - '"vol is mapped to cluster" not in cluster_stat.msg'
+ - cluster_stat.failed
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "Map stat: {{ cluster_stat }}"
+
+ # WARNING: Assume we have a vol mapped to a cluster with hosts host1 and host2.
+ # Then we unmap the vol from "host1" as shown below.
+ - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host1
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # This will UNMAP the vol from the entire CLUSTER!
+ - name: NEGATIVE test -> Stat removed mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host1
+ infini_map:
+ host: "{{ auto_prefix }}host1"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: host_stat
+ failed_when:
+ - '"mapped to neither" in host_stat.msg'
+ - not host_stat.failed
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "Map stat: {{ host_stat }}"
+
+ - name: NEGATIVE test -> Stat removed mapping of volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: cluster_stat
+ failed_when:
+ - '"mapped to neither host nor cluster" in cluster_stat.msg'
+ - not cluster_stat.failed
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "Map stat: {{ cluster_stat }}"
+
+ - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host3
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host3 again
+ infini_map:
+ host: "{{ auto_prefix }}host3"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when:
+ - '"was not mapped" not in result.msg'
+ - result.changed
+
+ - name: POSITIVE test -> Show map stat
+ ansible.builtin.debug:
+ msg: "result: {{ result.msg }}"
+
+ - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove mapping of volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
+ infini_map:
+ cluster: "{{ auto_prefix }}cluster"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ failed_when:
+ - '"was not mapped" not in result.msg'
+ - result.changed
+
+ - name: POSITIVE test -> Remove cluster {{ auto_prefix }}cluster
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove host {{ auto_prefix }}host1
+ infini_host:
+ name: "{{ auto_prefix }}host1"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove host {{ auto_prefix }}host2
+ infini_host:
+ name: "{{ auto_prefix }}host2"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove host {{ auto_prefix }}host3
+ infini_host:
+ name: "{{ auto_prefix }}host3"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml
new file mode 100644
index 000000000..3532008e9
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml
@@ -0,0 +1,18 @@
+---
+- hosts: localhost
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ vars:
+ name: iSCSI
+ service: iSCSI
+ tasks:
+
+ - name: POSITIVE test -> Remove network space named {{ name }}
+ infini_network_space:
+ name: "{{ name }}"
+ state: absent
+
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml
new file mode 100644
index 000000000..c7cb121df
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml
@@ -0,0 +1,234 @@
+---
+- hosts: localhost
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ tasks:
+
+ - name: POSITIVE test -> Remove cluster {{ auto_prefix }}cluster
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove cluster {{ auto_prefix }}cluster again
+ infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Rescan with remove option after removing mapping
+ shell: |
+ rescan-scsi-bus.sh --remove
+ become: True
+ register: rescan
+ failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+
+ - name: IDEMPOTENT test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host again
+ infini_map:
+ host: "{{ auto_prefix }}host"
+ volume: "{{ auto_prefix }}vol"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove host {{ auto_prefix }}host
+ infini_host:
+ name: "{{ auto_prefix }}host"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove host {{ auto_prefix }}host2
+ infini_host:
+ name: "{{ auto_prefix }}host2"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove host {{ auto_prefix }}host again
+ infini_host:
+ name: "{{ auto_prefix }}host"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove export client for export /{{ auto_prefix }}export
+ infini_export_client:
+ client: 20.20.20.20
+ state: absent
+ access_mode: "RO"
+ export: "/{{ auto_prefix }}export"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove export client for export /{{ auto_prefix }}export again
+ infini_export_client:
+ client: 20.20.20.20
+ state: absent
+ access_mode: "RO"
+ export: "/{{ auto_prefix }}export"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove export {{ auto_prefix }}export of file system {{ auto_prefix }}fs
+ infini_export:
+ name: "/{{ auto_prefix }}export"
+ filesystem: "{{ auto_prefix }}fs"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove export {{ auto_prefix }}export of file system {{ auto_prefix }}fs again
+ infini_export:
+ name: "/{{ auto_prefix }}export"
+ filesystem: "{{ auto_prefix }}fs"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove file system named {{ auto_prefix }}fs_default under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs_default"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove file system named {{ auto_prefix }}fs_thick under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs_thick"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool again
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove thick volume {{ auto_prefix }}vol_thick under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol_thick"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool again
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove snapshot {{ auto_prefix }}vol_snap
+ infini_vol:
+ name: "{{ auto_prefix }}vol_snap"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove file system named {{ auto_prefix }}fs again from now missing pool {{ auto_prefix }}pool
+ infini_fs:
+ name: "{{ auto_prefix }}fs"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove user {{ auto_prefix }}read_only_user
+ infini_user:
+ user_name: "{{ auto_prefix }}read_only_user"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove user {{ auto_prefix }}admin_user
+ infini_user:
+ user_name: "{{ auto_prefix }}admin_user"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove user {{ auto_prefix }}pool_admin_user
+ infini_user:
+ user_name: "{{ auto_prefix }}pool_admin_user"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove user {{ auto_prefix }}pool_admin_user again
+ infini_user:
+ user_name: "{{ auto_prefix }}pool_admin_user"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml
new file mode 100644
index 000000000..0320d8bd4
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml
@@ -0,0 +1,32 @@
+---
+- hosts: localhost
+ gather_facts: false
+ collections:
+ - infinidat.infinibox
+ tasks:
+
+ - name: POSITIVE test -> Remove thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Remove volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool again
+ infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove pool {{ auto_prefix }}pool
+ infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"