summaryrefslogtreecommitdiffstats
path: root/ansible_collections/ibm/storage_virtualize/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/ibm/storage_virtualize/playbooks')
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md88
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml97
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml71
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars52
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars42
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars4
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml119
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml34
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml24
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml74
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml47
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt28
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml203
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt30
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml27
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml79
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt36
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml33
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml147
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt28
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt45
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml143
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml33
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt36
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml29
25 files changed, 1549 insertions, 0 deletions
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md
new file mode 100644
index 000000000..ccc441f09
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md
@@ -0,0 +1,88 @@
+# Flashsystem Configuration Replication using Ansible
+
+**Objective:**
+Replication of system configuration from one Flashsystem to another Flashsystem
+
+**Prerequisite:**
+- IBM Storage Virtualize ansible collection version 2.2.0 or above must be installed
+
+**Features:**
+- Set System name
+- Set up NTP server
+- Set up Timezone
+- Set up DNS server
+- Create Ownership groups
+- Create Usergroups
+- Create Users
+
+**Details about files:**
+
+1. src_cluster_vars:
+ This file stores source cluster credentials, defines settable_fields required to extract from source cluster for specified entity, and formats raw extracted data to generate replication_vars file in desired format which will be provided as an input to replicate_config_on_target_cluster.yml playbook.
+ Fields to be set by user:
+ cluster_ip: Cluster IP source system
+ cluster_username: Username of source system
+ cluster_password: Password of source system
+
+2. extract_src_cluster_config.yml:
+ This playbook takes src_cluster_vars file as an input and gathers cluster configuration and writes it onto replication_vars file in a format that can be consumed by replicate_config_on_target_cluster.yml playbook for replication.
+ To run this playbook:
+ ```
+ ansible-playbook extract_src_cluster_config.yml
+ ```
+
+3. target_cluster_vars:
+ This file stores destination cluster credentials, which can be encrypted.
+ Fields to be set by user:
+ cluster_ip: Cluster IP target system
+ cluster_username: Username of target system
+ cluster_password: Password of target system
+ user_default_password: Default password to be set for newly created users
+ >IMPORTANT:
+ user_default_password value should be according to Flashsystem password policy
+ To encrypt cluster_vars file:
+ ```
+ ansible-vault encrypt target_cluster_vars
+ ```
+ This command will promt to set a password. Note the password entered at prompt, as it will be required while running playbooks
+ To open/ edit this file later:
+ ```
+ ansible-vault edit target_cluster_vars
+ ```
+
+4. replicate_config_on_target_cluster.yml:
+ Run this playbook after running extract_src_cluster_config.yml playbook. This playbook takes replication_vars file generated by extract_src_cluster_config.yml playbook as an input and configures target system accordingly.
+ Note: If target_cluster_vars file has been encrypted use following command to run replicate_config_on_target_cluster.yml playbook-
+ ```
+ ansible-playbook replicate_config_on_target_cluster.yml --ask-vault-pass
+ ```
+ Enter password used in previous step to encrypt target_cluster_vars file
+ >Note:
+ This playbook sets default password for users created on target system, user will be asked to change this default password on next login.
+ >IMPORTANT:
+ If superuser is also being replicated, use the default password for next login.
+
+**Usage:**
+1. Replicate from one system to another exactly:
+- Set source cluster details in src_cluster_vars
+- Run playbook extract_src_cluster_config.yml
+- Set target cluster details in target_cluster_vars
+- Run playbook replicate_config_on_target_cluster.yml
+
+2. Replicate from one system to another with few modifications:
+- Set source cluster details in src_cluster_vars
+- Run playbook extract_src_cluster_config.yml
+- Edit config fields to be modified in replication_vars file
+- Set target cluster details in target_cluster_vars
+- Run playbook replicate_config_on_target_cluster.yml
+
+3. Set-up system by customising config:
+- Set config fields in replication_vars file
+- Set target cluster details in target_cluster_vars
+- Run playbook replicate_config_on_target_cluster.yml
+
+**Authors:**
+Ajinkya Nanavati (ananava1@in.ibm.com)
+Devendra Mahajan (demahaj1@in.ibm.com)
+Mohit Chitlange (mochitla@in.ibm.com)
+Vrinda Dhakad (vrinda.dhakad@ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml
new file mode 100644
index 000000000..86be57f1b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml
@@ -0,0 +1,97 @@
+- name: Using Storage Virtualize collection to extract source cluster config
+ hosts: localhost
+ vars_files:
+ - vars/src_cluster_vars
+ vars:
+ src_file: "vars/src"
+ dest_file: "vars/replication_vars"
+
+ collections:
+ - ibm.storage_virtualize
+
+ gather_facts: no
+ connection: local
+ tasks:
+
+ - name: Fetch authorization token for source
+ register: svc_token
+ ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Get details of the cluster
+ register: sysinfo
+ ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ svc_token.token }}"
+ gather_subset: [system, dnsserver, ownershipgroup, usergroup, user, emailserver, emailuser]
+ log_path: /tmp/sysinfo.debug
+
+ - name: Define variables
+ set_fact:
+ settable_system_info: {}
+ settable_info_tasks:
+ DnsServer: settable_dns_info
+ Ownershipgroup: settable_ownershipgrp_info
+ UserGrp: settable_usergrp_info
+ User: settable_user_info
+
+ - name: Get settable system parameters from list
+ set_fact:
+ settable_system_info: "{{ settable_system_info | combine({item: sysinfo.System[item]}) }}"
+ loop: "{{settable_fields.System_fields}}"
+ when: sysinfo.System[item] is defined
+
+ - name: Get settable parameters from list
+ set_fact:
+ "{{ item.value }}": "{{ sysinfo[item.key] | json_query(query) }}"
+ vars:
+ query: "[*].{ {% for field in settable_fields[item.key ~ '_fields'] %} {{ field }}: {{ field }}{% if not loop.last %},{% endif %}{% endfor %} }"
+ loop: "{{ settable_info_tasks | dict2items }}"
+ when: sysinfo[item.key] is defined
+
+ - name: Get current timestamp
+ command: "date '+%Y%m%d%H%M%S'"
+ register: timestamp_output
+
+ - name: Add timestamp to the temp file
+ set_fact:
+ src_file: "{{ src_file }}_{{ timestamp_output.stdout }}"
+
+ - name: Create empty file
+ file:
+ path: "{{ src_file }}"
+ state: touch
+ force: yes
+ register: src_creation
+
+ - name: Write content into file
+ lineinfile:
+ path: "{{ src_file }}"
+ line: "System: {{settable_system_info }} \nDnsServer: {{settable_dns_info}} \nOwnershipGroup: {{settable_ownershipgrp_info}} \nUserGrp: {{settable_usergrp_info}} \nUser: {{settable_user_info}}"
+
+ - name: Read file content
+ register: file_data
+ slurp:
+ src: "{{ src_file }}"
+
+ - name: Modify file content
+ set_fact:
+ modified_content: "{{ file_data.content | b64decode }}"
+
+ - name: Search and replace strings
+ loop: "{{ search_replace_pairs }}"
+ set_fact:
+ modified_content: "{{ modified_content | regex_replace(item.search, item.replace) }}"
+
+ - name: Write modified content to destination file
+ copy:
+ content: "{{ modified_content }}"
+ dest: "{{ dest_file }}"
+ force: yes
+
+ - name: Deleting temporary source file
+ file:
+ path: "{{ src_file }}"
+ state: absent
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml
new file mode 100644
index 000000000..3bd5d0fa9
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml
@@ -0,0 +1,71 @@
+- name: Using Storage Virtualize collection to replicate system
+ hosts: localhost
+ vars_files:
+ - vars/replication_vars
+ - vars/target_cluster_vars
+
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+
+ tasks:
+ - name: Fetch authorization token for target cluster
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Initial cluster configuration on FlashSystem
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+
+ - name: Setup NTP server
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ ntpip: "{{ System.cluster_ntp_IP_address if System.cluster_ntp_IP_address is defined }}"
+
+ - name: Setup time zone
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ timezone: "{{ System.time_zone | regex_search('^[^\\s]+') if System.time_zone is defined }}"
+
+ - name: Setup DNS server
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ dnsname: "{{ DnsServer | map (attribute='name') | list }}"
+ dnsip: "{{ DnsServer | map (attribute='IP_address') | list }}"
+
+ - name: Create Ownership group
+ ibm.storage_virtualize.ibm_svc_manage_ownershipgroup:
+ name: "{{ item.name }}"
+ state: present
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ loop: "{{OwnershipGroup | default([], true) }}"
+
+ - name: Create Usergroups
+ ibm.storage_virtualize.ibm_svc_manage_usergroup:
+ name: "{{ item.name }}"
+ state: present
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ role: "{{item.role }}"
+ ownershipgroup: "{{item.owner_name }}"
+ loop: "{{ UserGrp | default([], true) }}"
+
+ - name: Create Users with password
+ ibm.storage_virtualize.ibm_svc_manage_user:
+ name: "{{ item.name }}"
+ state: present
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ usergroup: "{{ item.usergrp_name }}"
+ user_password: "{{ user_default_password }}"
+ forcepasswordchange: true
+ auth_type: usergrp
+ loop: "{{ User | default([], true) }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars
new file mode 100644
index 000000000..92b6eee26
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars
@@ -0,0 +1,52 @@
+# This file is auto-generated by playbook extract_src_cluster_config.yml
+System:
+ time_zone: 200 IST
+ cluster_ntp_IP_address: x.x.x.x # NTP server IP address automatically populated from playbook
+ cluster_isns_IP_address:
+
+DnsServer:
+- name: dnsserver1
+ IP_address: y.y.y.y
+
+- name: dnsserver2
+ IP_address: z.z.z.z
+
+OwnershipGroup:
+- name: ownershipgroup0
+
+- name: ownershipgroup1
+
+UserGrp:
+- name: SecurityAdmin
+ role: SecurityAdmin
+ owner_name:
+
+
+- name: Administrator
+ role: Administrator
+ owner_name:
+
+
+- name: CopyOperator
+ role: CopyOperator
+ owner_name:
+
+
+- name: Service
+ role: Service
+ owner_name:
+
+
+- name: Monitor
+ role: Monitor
+ owner_name:
+
+
+- name: RestrictedAdmin
+ role: RestrictedAdmin
+ owner_name:
+
+User:
+- name: superuser
+ usergrp_name: SecurityAdmin
+
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars
new file mode 100644
index 000000000..2b7c60b64
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars
@@ -0,0 +1,42 @@
+cluster_ip: x.x.x.x # Cluster IP of source system
+cluster_username: username # Username of source system
+cluster_password: password # Password of source system
+
+search_replace_pairs:
+ - search: "{"
+ replace: "\n"
+ - search: "}"
+ replace: "\n"
+ - search: ","
+ replace: "\n "
+ - search: "'"
+ replace: ""
+ - search: "\\["
+ replace: ""
+ - search: "\\]"
+ replace: ""
+ - search: "name"
+ replace: "- name"
+ - search: "_- name"
+ replace: "_name"
+ - search: "time_zone"
+ replace: " time_zone"
+
+settable_fields:
+ System_fields:
+ - time_zone
+ - cluster_ntp_IP_address
+ - cluster_isns_IP_address
+ DnsServer_fields:
+ - name
+ - IP_address
+ Ownershipgroup_fields:
+ - name
+ UserGrp_fields:
+ - name
+ - role
+ - owner_name
+ User_fields:
+ - name
+ - usergrp_name
+
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars
new file mode 100644
index 000000000..e9d6fabab
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars
@@ -0,0 +1,4 @@
+cluster_ip: x.x.x.x # Cluster IP of target system
+cluster_username: username # Username of target system
+cluster_password: password # Password of target system
+user_default_password: new_password # Default password for users created on target system
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml b/ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml
new file mode 100644
index 000000000..d1c841a3a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml
@@ -0,0 +1,119 @@
+---
+- name: Using IBM Storage Virtualize collection to create rc consistency group
+ hosts: localhost
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ vars:
+ - auxcluster: x.x.x.x
+ - ausername: ausername
+ - apassword: apassword
+ - clustername: clustername
+ - username: username
+ - password: password
+ - cgname: Group_cg11
+ - remotecluster: Cluster_x.x.x.x
+ - masterpool: site1pool1
+ - mastervol: master
+ - relname: scopy5
+ - auxvol: auxvol
+ connection: local
+ tasks:
+ - name: Fetch authorization token for aux
+ register: auth
+ ibm_svc_auth:
+ clustername: "{{auxcluster}}"
+ username: "{{ausername}}"
+ password: "{{apassword}}"
+ - name: create target volume
+ ibm_svc_manage_volume:
+ clustername: "{{ auxcluster }}"
+ token: "{{auth.token}}"
+ pool: "{{auxpool}}"
+ name: "{{auxvol}}"
+ size: 10
+ unit: "gb"
+ state: present
+ - name: Fetch authorization token for master
+ register: results
+ ibm_svc_auth:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ - name: create remote copy cg
+ ibm_svc_manage_replicationgroup:
+ name: "{{cgname}}"
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ - name: Create source volume
+ ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ pool: "{{masterpool}}"
+ name: "{{mastervol}}"
+ size: 1
+ unit: "gb"
+ state: present
+ - name: create MM remote copy
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: metro
+ sync: true
+ consistgrp: "{{cgname}}"
+ - name: remove the remote copy from CG
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: metro
+ noconsistgrp: true
+ - name: Convert MM to GM
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: global
+ - name: Convert GM to GMCV
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: GMCV
+ consistgrp: "{{cgname}}"
+ - name: Create/attach master change volume
+ ibm_svc_manage_cv:
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ rname: "{{relname}}"
+ cvname: "{{ mastervolcv }}"
+ basevolume: "{{ mastervol }}"
+ - name: Create/attach aux change volume
+ ibm_svc_manage_cv:
+ clustername: "{{ auxcluster }}"
+ token: "{{auth.token}}"
+ state: present
+ rname: "{{relname}}"
+ cvname: "{{ auxvolcv }}"
+ basevolume: "{{ auxvol }}"
+ ismaster: false
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml b/ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml
new file mode 100644
index 000000000..83a19ccda
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml
@@ -0,0 +1,34 @@
+---
+- name: Using the IBM Storage Virtualize collection
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ hosts: localhost
+ vars:
+ - clustername: x.x.x.x
+ - username: username
+ - password: password
+ - volname: vol0
+ - pool: pool0
+ - easy_tier: "off"
+ - size: 1
+ - unit: gb
+ tasks:
+ - name: Send CLI command over ssh connection
+ ibm_svctask_command:
+ command: [
+ "svctask mkvdisk -name {{ volname }} -mdiskgrp '{{ pool }}' -easytier '{{ easy_tier }}' -size {{ size }} -unit {{ unit }}",
+ "svctask rmvdisk {{ volname }}"
+ ]
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ - name: Send CLI command over ssh connection
+ ibm_svcinfo_command:
+ command: "svcinfo lsvdisk"
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml b/ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml
new file mode 100644
index 000000000..1488bcded
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml
@@ -0,0 +1,24 @@
+---
+- name: Using the IBM Storage Virtualize collection
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ hosts: localhost
+ vars:
+ - user: username
+ - clustername: x.x.x.x
+ - username: username
+ - password: password
+ tasks:
+ - name: Run CLI commands
+ register: results
+ ibm_svcinfo_command:
+ command: "svcinfo lssystem"
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/test.debug
+ - name: show time zone in lssystem
+ set_fact:
+ time_zone: "{{ (results['stdout'] | from_json).time_zone }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml b/ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml
new file mode 100644
index 000000000..96e78bf2d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml
@@ -0,0 +1,74 @@
+- name: Using Storage Virtualize collection to automate initial setup configuration
+ hosts: localhost
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+ - address: address
+ - city: city
+ - company_name: company_name
+ - contact_email: contact_email
+ - contact_name: contact_name
+ - country: country
+ - location: location
+ - primary_phonenumber: primary_phonenumber
+ - postal_code: postal_code
+ - province: province
+ - server_ip: x.x.x.x
+ - server_port: xxxx
+ connection: local
+ tasks:
+ - name: Get auth token
+ register: results
+ ibm_svc_auth:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ - name: 1.initial setup config
+ ibm_svc_initial_setup:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ system_name: "{{ system_name }}"
+ dnsname:
+ - dnsserver01
+ dnsip:
+ - 'x.x.x.x'
+ - name: 2.Configure callhome with "email"
+ ibm_svc_manage_callhome:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: "enabled"
+ callhome_type: "email"
+ address: "{{ address}}"
+ city: "{{ city }}"
+ company_name: "{{ company_name }}"
+ contact_email: "{{ contact_email }}"
+ contact_name: "{{ contact_name }}"
+ country: "{{ country }}"
+ location: "{{ location }}"
+ phonenumber_primary: "{{ primary_phonenumber }}"
+ postalcode: "{{ postal_code }}"
+ province: "{{ province }}"
+ serverIP: "{{ server_ip }}"
+ serverPort: "{{ server_port }}"
+ inventory: "on"
+ invemailinterval: 1
+ enhancedcallhome: "on"
+ censorcallhome: "on"
+ - name: 3.Configure SRA
+ ibm_svc_manage_sra:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: enabled
+ name: SRA
+ sra_ip: y.y.y.y
+ sra_port: 22
+ support: remote
+ - name: 4.Complete initial setup
+ ibm_svc_complete_initial_setup:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml b/ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml
new file mode 100644
index 000000000..0ab44b4b5
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml
@@ -0,0 +1,47 @@
+---
+- name: Testing the IBM Storage Virtualize collection ibm_svc_vol_map
+ hosts: localhost
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+ - domain: domain
+ - test_vdisk: vdisk_name
+ - pool: pool
+ - test_host: host_name
+ - fcwwpn1: fcwwpn
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Create volume
+ ibm_svc_manage_volume:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "{{test_vdisk}}"
+ state: present
+ pool: "{{pool}}"
+ size: "1024"
+ unit: "mb"
+ - name: Creating Host
+ ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "{{test_host}}"
+ state: present
+ fcwwpn: "{{ fcwwpn1 }}"
+ - name: map Host to Vdisk
+ ibm_svc_vol_map:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ volname: "{{test_vdisk}}"
+ host: "{{test_host}}"
+ state: present
+ scsi: 0
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt
new file mode 100644
index 000000000..319ecff2b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt
@@ -0,0 +1,28 @@
+Objective:
+This playbook creates FC host, multiple volumes, zones on Flashsystem Cluster and performs mapping of all volumes to host.
+
+Prerequisite:
+- IBM storage Virtualize and Brocade ansible collection plugins must be installed
+- For more information on Brocade switch ansible collection, please refer to https://github.com/brocade/ansible/blob/master/README.rst
+
+These playbooks maps multiple volumes of cluster to fc host
+- It uses storage virtualize ansible modules as well as brocade ansible modules to create zone
+
+There are total 2 files used for this use-case
+
+1. multiple_vol_creation_zone_map_vars
+ This file has all the variables required for playbooks
+ - cluster_* : Parameters starting with cluster contain cluster details where user wants to create volume, hosst etc
+ - brocade_switch_* : Parameters starting with brocade_switch contain brocade switch details
+ - application_host_*: Parameters starting with application_host contain application host details which is performing read/write of data
+ - volume_details : Parameters starting with volume contain volume details which will be mapped to host
+ - portset_* : Parameters starting with portset contain portset details required for creating fc host
+
+2. multi_volume_create_host_mapping_zone_multipath
+ - This playbook fetches the list of SCSI_HOST WWPN's associated with given fcioportid from specV cluster
+ - Creates zone with the name given and add specV ports fetched and host WWPN's given
+ - Creates multiple volumes based on volume details provided
+ - Maps the multiple volumes to Host to form multiple paths
+
+Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml
new file mode 100644
index 000000000..a30d9bf83
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml
@@ -0,0 +1,203 @@
+- name: Using Storage Virtualize collection to migrate given volumes
+ hosts: localhost
+ vars_files:
+ - multiple_vol_creation_zone_map_vars
+ collections:
+ - ibm.storage_virtualize
+ - brocade.fos
+ vars:
+ brocade_credentials:
+ fos_ip_addr: "{{ brocade_switch_ip }}"
+ fos_user_name: "{{ brocade_switch_username }}"
+ fos_password: "{{ brocade_switch_password }}"
+ https: False
+ gather_facts: no
+ connection: local
+ tasks:
+
+ - name: Fetch authorization token for source
+ register: specv_token
+ ibm.storage_virtualize.ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Get details of the targetportfc
+ register: fcdetails
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ gather_subset: [targetportfc]
+ log_path: /tmp/fcdetails.debug
+
+ - name: get the WWPN list from lstargetportfc for given fc_port_id
+ set_fact:
+ specv_wwpn: "{{ specv_wwpn|default([]) + [item['WWPN']]}}"
+ when: (item.protocol == 'scsi' and item.host_io_permitted == 'yes' and item.fc_io_port_id in cluster_fcioportid)
+ loop: "{{ fcdetails.TargetPortFC }}"
+
+ - name: modify specv wwpn the way switch want
+ set_fact:
+ specv_wwpn_switch_format: "{{ specv_wwpn_switch_format|default([]) +[item|map('join')|join(':')] }}"
+ loop: "{{ (specv_wwpn)|map('batch', 2)|map('list')|list|lower }}"
+
+ - name: get all zoning information from switch
+ brocade.fos.brocade_facts:
+ credential: "{{brocade_credentials}}"
+ vfid: -1
+ gather_subset:
+ - brocade_zoning
+
+ - name: copy the active config in var active_switch_config
+ set_fact:
+ active_switch_config: "{{ ansible_facts.brocade_zoning['effective-configuration'].cfg_name }}"
+
+ - name: Create zones on Brocade switch
+ vars:
+ zone:
+ - name: "{{ application_host_zone_name }}"
+ members: "{{ application_host_wwpns + specv_wwpn_switch_format }}"
+ brocade.fos.brocade_zoning_zone:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ zones: "{{ zone }}"
+ members_add_only: True
+
+ - name: Add zone to active configuration
+ vars:
+ cfgs:
+ - name: "{{ active_switch_config }}"
+ members:
+ - "{{ application_host_zone_name }}"
+ brocade.fos.brocade_zoning_cfg:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ members_add_only: True
+ cfgs: "{{ cfgs }}"
+ active_cfg: "{{ active_switch_config }}"
+
+ - name: create host list for specv without colon format
+ set_fact:
+ application_host_wwpns_specvformat_list: "{{ application_host_wwpns_specvformat_list | default([]) + [(item | replace(':',''))|upper]}}"
+ loop: "{{application_host_wwpns }}"
+
+ - name: create host list for specv without colon format
+ set_fact:
+ application_host_wwpns_specvformat: "{{application_host_wwpns_specvformat |default('')+item +':'}}"
+ loop: "{{application_host_wwpns_specvformat_list| select() }}"
+
+ - set_fact:
+ application_host_wwpns_specvformat: "{{ application_host_wwpns_specvformat[:-1]}}"
+
+ - name: Creating Host on specv
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ host_name }}"
+ state: present
+ fcwwpn: "{{ application_host_wwpns_specvformat }}"
+
+ - name: Create a fc porset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ portset_name }}"
+ porttype: fc
+ portset_type: host
+ state: present
+
+ - name: Add port ID to the portset
+ ibm.storage_virtualize.ibm_sv_manage_fcportsetmember:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ portset_name }}"
+ fcportid: "{{item}}"
+ state: present
+ loop: "{{ cluster_fcioportid }}"
+
+ - name: Create vdisk
+ register: results_cvdisk
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{cluster_ip}}"
+ token: "{{ specv_token.token }}"
+ domain:
+ state: present
+ name: "{{item.vol_name}}"
+ pool: "{{item.mdiskgrp}}"
+ size: "{{item.size}}"
+ unit: "{{item.unit}}"
+ loop: "{{ volume_details }}"
+
+ - name: map Host to Vdisk
+ ibm.storage_virtualize.ibm_svc_vol_map:
+ clustername: "{{cluster_ip}}"
+ token: "{{ specv_token.token }}"
+ domain:
+ state: present
+ volname: "{{item.vol_name}}"
+ host: "{{host_name}}"
+ loop: "{{ volume_details }}"
+
+ - name: Rescan the paths on the host and run multipath
+ shell: "ssh {{application_host_username}}@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan;sleep 40;"
+
+ - shell: "ssh {{application_host_username}}@{{application_host_ip}} multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - debug:
+ msg: "{{ multipath_var}}"
+
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ gather_subset: [vol]
+ log_path: /tmp/volinfo.debug
+
+ - name: create volume list
+ set_fact:
+ vol_name_list: "{{ vol_name_list|default([])+ [item['vol_name']] }}"
+ loop: "{{ volume_details }}"
+
+ - debug:
+ msg: "{{ vol_name_list }}"
+
+ - name: find vollist data
+ set_fact:
+ vol_list_full_data: "{{ vol_list_full_data|default([])+ [item] }}"
+ vol_name_uid: "{{ vol_name_uid|default([])+[[item['volume_name'],item['vdisk_UID']|lower]]}}"
+ when: (item.volume_name in vol_name_list )
+ loop: "{{ volinfo.Volume }}"
+
+ - debug:
+ msg: "{{ vol_name_uid }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{dm_device| default([]) +[ [item.0] + [item.1] + [item.2]]}}"
+ when: (item.1 in item.2)
+ with_nested:
+ - "{{ vol_name_uid }}"
+ - "{{ multipath_var }}"
+
+ - name: find unmapped volume
+ set_fact:
+ vdisk_mapped_multipath: "{{vdisk_mapped_multipath| default([]) + [item[0]]}}"
+ loop: "{{ dm_device }}"
+
+ - debug:
+ msg: "{{ vdisk_mapped_multipath }}"
+
+ - name: find unmapped volume
+ set_fact:
+ unmaped_vol_name_list: "{{ unmaped_vol_name_list|default([])+ [item] }}"
+ when: (item not in vdisk_mapped_multipath)
+ loop: "{{ vol_name_list }}"
+
+ - debug:
+ msg: "{{ unmaped_vol_name_list }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt
new file mode 100644
index 000000000..8a4fcdb18
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt
@@ -0,0 +1,30 @@
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username
+application_host_password: password
+application_host_zone_name: test
+application_host_wwpns: ["10:00:00:90:fa:94:20:d0","10:00:00:90:fa:94:20:d2"]
+
+cluster_ip: x.x.x.x
+cluster_username: username1
+cluster_password: password1
+cluster_fcioportid: ['1']
+
+host_name: linux_ansible
+portset_name: portset_ansible
+portset_type: host
+port_type: fc
+brocade_switch_ip: z.z.z.z
+brocade_switch_username: username2
+brocade_switch_password: password2
+
+volume_details:
+ - vol_name: vdisk_3
+ mdiskgrp: "0"
+ size: "5"
+ unit: "gb"
+ - vol_name: vdisk_4
+ mdiskgrp: "0"
+ size: "5"
+ unit: "gb"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml b/ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml
new file mode 100644
index 000000000..4fe6cdc6c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml
@@ -0,0 +1,27 @@
+- name: Using Storage Virtualize collection to change security settings
+ hosts: localhost
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Change max failed login limit
+ ibm.storage_virtualize.ibm_sv_manage_security:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "/tmp/playbook.debug"
+ maxfailedlogins: 5
+
+ - name: Change SSH protocol level
+ ibm.storage_virtualize.ibm_sv_manage_security:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "/tmp/playbook.debug"
+ sshprotocol: 2 \ No newline at end of file
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml
new file mode 100644
index 000000000..4fed509b0
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml
@@ -0,0 +1,79 @@
+- name: Using Storage Virtualize collection to initiate migration
+ hosts: localhost
+ vars:
+ - auxcluster: x.x.x.x
+ - auxusername: auxusername
+ - auxpassword: auxpassword
+ - clustername: clustername
+ - username: username
+ - password: password
+ - cgname: Group_cg11
+ - remote_cluster: Cluster_x.x.x.x
+ - masterpool: site1pool1
+ - mastervol: master
+ - relname: scopy5
+ - auxvol: auxvol
+ - fcwwpn: fcwwpn
+ - size: 1
+ - unit: gb
+ - remote_pool: remote_pool
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for aux
+ register: auth
+ ibm_svc_auth:
+ clustername: "{{ auxcluster }}"
+ username: "{{auxusername}}"
+ password: "{{auxpassword}}"
+ - name: Fetch authorization token for master
+ register: results
+ ibm_svc_auth:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ - name: "create host"
+ ibm_svc_host:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: host_xyz
+ state: present
+ fcwwpn: "{{fcwwpn}}"
+ protocol: scsi
+ - name: "Create source volume source_vol_1 "
+ ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ pool: "{{masterpool}}"
+ name: "source_vol_1"
+ size: "{{size}}"
+ unit: "{{ unit }}"
+ state: present
+ - name: Map Source volume to a host
+ ibm_svc_vol_map:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ volname: "source_vol_1"
+ host: "host_xyz"
+ state: present
+ - name: Try to initiate a volume migration with replicate_hosts as true when no hosts exists on targets system as on source system"
+ ibm_svc_manage_migration:
+ source_volume: "source_vol_1"
+ target_volume: "target_vol_1"
+ clustername: "{{ clustername }}"
+ remote_cluster: "{{ remote_cluster }}"
+ token: "{{ results.token }}"
+ state: initiate
+ replicate_hosts: true
+ remote_token: "{{ auth.token }}"
+ relationship_name: "mmapping_1"
+ remote_pool: "{{ remote_pool}}"
+ - name: Switch replication direction of a migration relationship when all host are mapped"
+ ibm_svc_manage_migration:
+ relationship_name: "mmapping_1"
+ clustername: "{{ clustername}}"
+ token: "{{ results.token }}"
+ state: switch
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt
new file mode 100644
index 000000000..104909118
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt
@@ -0,0 +1,36 @@
+Objective:
+Migrate volume from one Flash System to another Flash System in application transparent manner.
+
+Prerequisite:
+- IBM storage Virtualize and Brocade ansible collection plugins must be installed
+- For more information on Brocade switch ansible collection, please refer to https://github.com/brocade/ansible/blob/master/README.rst
+
+These playbooks migrate a volume from a source cluster to the destination cluster.
+ - It uses storage virtualize ansible modules as well as brocade ansible modules to create zone.
+ - These playbooks are designed to migrate volume mapped to same Fibre Channel (FC) host from source cluster to destination cluster.
+
+There are total 3 files used for this use-case.
+ 1. vol_migration_vars:
+ This file has all the variables required for playbooks.
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - dest_cluster* : Parameters starting with dest_cluster contain destination cluster details to where volume will be migrated
+ - brocade_switch_* : Parameters starting with brocade_switch contain brocade switch details
+ - application_host_*: Parameters starting with application_host contain application host details which is performing read/write of data
+ - volume_details : It consists of volume to be migrated with its source and destination name with host it is attached to
+ 2. initiate_migration_for_given_volume:
+ - This playbook initiates the migration, creates fc host with the same name as source cluster and adds it to the default portset.
+ - Most importantly, it also starts data copy from source cluster to destination cluster
+ Note:
+ User should not run playbook create_zone_map_volume_and_rescan until relationship is in consistent_syncronized state
+ 3. create_zone_map_volume_and_rescan
+ - Execute this playbook once the relationship created by above playbook is in consistent_syncronized state.
+ - This playbook fetches the list of SCSI_HOST WWPN's associated with given fcioportid from specV destination cluster.
+ - Creates zone with the name given and add specV ports fetched and host WWPN's given.
+ - Maps the volume to the Host and starts scsi rescan on the host.
+ - Switch replication direction of a migration relationship once host is mapped.
+ - Again rescan the volume on the host to get the updated path details.
+ - Delete source volume and migration relationship which was created.
+ - Again rescan the volume on the host to get the reduced paths.
+
+ Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml
new file mode 100644
index 000000000..541b509b2
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml
@@ -0,0 +1,33 @@
+- name: Using Storage Virtualize collection to initiate migration
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars.txt
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Initiate a volume migration with replicate_hosts as true
+ ibm_svc_manage_migration:
+ source_volume: "{{ src_vol_name }}"
+ target_volume: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ remote_cluster: "{{ dest_cluster_name }}"
+ token: "{{ src_token.token }}"
+ state: initiate
+ replicate_hosts: true
+ remote_token: "{{ dest_token.token }}"
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ remote_pool: "{{ dest_cluster_pool_name }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml
new file mode 100644
index 000000000..64c9f9d40
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml
@@ -0,0 +1,147 @@
+- name: Using Storage Virtualize collection to migrate given volume
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars.txt
+ collections:
+ - ibm.storage_virtualize
+ - brocade
+ gather_facts: no
+ vars:
+ brocade_credentials:
+ fos_ip_addr: "{{ brocade_switch_ip }}"
+ fos_user_name: "{{ brocade_switch_username }}"
+ fos_password: "{{ brocade_switch_password }}"
+ https: False
+ dest_vol_name: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ dest_host_name: "{{ host_name }}"
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [vol]
+ objectname: "{{ dest_vol_name }}"
+ log_path: /tmp/volinfo.debug
+ - name: Get the volume UID data
+ set_fact:
+ vol_uid: "{{ volinfo.Volume[0]['vdisk_UID'] | lower }}"
+ when: volinfo.Volume[0] is defined
+ - name: Get deatils of the targetportfc.
+ register: fcdetails
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [targetportfc]
+ log_path: /tmp/fcdetails.debug
+ - name: get the WWPN list from lstargetportfc for given fc_port_id
+ set_fact:
+ specv_wwpn: "{{ specv_wwpn|default([]) + [item['WWPN']]}}"
+ when: (item.protocol == 'scsi' and item.host_io_permitted == 'yes' and item.fc_io_port_id in dest_cluster_fcioportid)
+ loop: "{{ fcdetails.TargetPortFC }}"
+ - name: modify svc wwpn the way switch want
+ set_fact:
+ specv_wwpn_switch_format: "{{ specv_wwpn_switch_format|default([]) +[item|map('join')|join(':')] }}"
+ loop: "{{ (specv_wwpn)|map('batch', 2)|map('list')|list|lower }}"
+ - name: get all zoning information from switch
+ brocade_facts:
+ credential: "{{brocade_credentials}}"
+ vfid: -1
+ gather_subset:
+ - brocade_zoning
+ - name: copy the active config in var active_switch_config
+ set_fact:
+ active_switch_config: "{{ ansible_facts.brocade_zoning['effective-configuration'].cfg_name }}"
+ - name: Create zones on Brocade switch
+ vars:
+ zone:
+ - name: "{{ application_host_zone_name }}"
+ members: "{{ application_host_wwpns + specv_wwpn_switch_format }}"
+ brocade.fos.brocade_zoning_zone:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ zones: "{{ zone }}"
+ members_add_only: True
+ - name: Add zone to active configuration
+ vars:
+ cfgs:
+ - name: "{{ active_switch_config }}"
+ members:
+ - "{{ application_host_zone_name }}"
+ brocade_zoning_cfg:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ members_add_only: True
+ cfgs: "{{ cfgs }}"
+ active_cfg: "{{ active_switch_config }}"
+ - name: map Vol to host
+ ibm_svc_vol_map:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ state: present
+ volname: "{{ dest_vol_name }}"
+ host: "{{ dest_host_name }}"
+ scsi: 0
+ - name: Rescan the paths on the host and run multipath
+ shell: "ssh root@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan;sleep 40;"
+ - shell: "ssh root@{{application_host_ip}} multipath -ll"
+ register: ps
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+ - name: Find Vol UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+ - name: Switch replication direction of a migration relationship when host is mapped
+ ibm_svc_manage_migration:
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ token: "{{ src_token.token }}"
+ state: switch
+ - name: Rescan the scsi bus devices on the host
+ ansible.builtin.shell: "ssh root@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan"
+ - shell: "ssh root@{{application_host_ip}} multipath -ll"
+ register: ps
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+ - name: Find Vol UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+ - name: Delete source volume and migration relationship
+ ibm_svc_manage_migration:
+ clustername: "{{ src_cluster_ip }}"
+ state: cleanup
+ source_volume: "{{ src_vol_name}}"
+ token: "{{ src_token.token }}"
+ log_path: /tmp/ansible.log
+ - shell: "ssh root@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: "ssh root@{{application_host_ip}} multipath -ll"
+ register: ps
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+ - name: Find Vol UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+ - debug:
+ msg: "{{ dm_device }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt
new file mode 100644
index 000000000..1d9f399e8
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt
@@ -0,0 +1,28 @@
+src_cluster_name: Master
+src_cluster_ip: x.x.x.x
+src_cluster_username: username
+src_cluster_password: password
+
+dest_cluster_name: Aux_far
+dest_cluster_ip: y.y.y.y
+dest_cluster_username: username1
+dest_cluster_password: password1
+dest_cluster_pool_name: Pool0
+dest_cluster_fcioportid: ['1']
+
+brocade_switch_ip: z.z.z.z
+brocade_switch_username: username2
+brocade_switch_password: password2
+
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username4
+application_host_password: password4
+application_host_wwpns: ["10:00:00:90:fa:94:20:d0","10:00:00:90:fa:94:20:d2"]
+application_host_zone_name: test
+
+src_vol_name: vdisk_application1
+host_name: linux_host
+dest_vol_name: vdisk_application1
+rel_name: r1
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt
new file mode 100644
index 000000000..a69bd5c75
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt
@@ -0,0 +1,45 @@
+Objective:
+Migrate volume from one Flash System to another Flash System in application transparent manner with target host as ISCSI.
+
+Prerequisite:
+- IBM Storage Virtualize ansible collection plugins must be installed
+
+These playbooks migrate a volume from a source cluster to the destination cluster.
+These playbooks are designed to migrate volume mapped to Fibre Channel (FC) host or ISCSI host from source cluster to ISCSI host on destination cluster.
+
+There are total 3 files used for this use-case.
+ 1. vol_migration_vars:
+ This file has all the variables required for playbooks
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - dest_cluster* : Parameters starting with dest_cluster contain destination cluster details to where volume will be migrated
+ - application_host_* : Parameters starting with application_host contain application host details which is performing read/write of data
+ - application_iscsi_ip : This contains in detail information for ip to be given to node with detail information as follows
+ - node_name: Node name of cluster
+ - portset: portset name to be used
+ - ip_address: <ip address>
+ - subnet_prefix: <prefix>
+ - gateway: <gateway>
+ - port: <port_id>
+ - src_vol_name : This suggest volume name of source cluster which is to be migrated
+ - dest_vol_name : This create volume name at destination cluster
+ - rel_name : This is name of relationship to be created between source and destination cluster
+ 2. initiate_migration_for_given_volume:
+ - This playbook initiates the migration
+ - Most importantly, it also starts data copy from source cluster to destination cluster
+ Note:
+ User should not run playbook create_zone_map_volume_and_rescan until relationship is in consistent_syncronized state
+ 3. create_host_map_volume_and_rescan
+ - Execute this playbook once the relationship created by above playbook is in consistent_syncronized state
+ - create iscsi host on flashsystem from iqn defined in variable application_host_iqn from variable file
+ - configuring ip on each node for iscsi host connectivity
+ - establish iscsi session from host to flashsystem nodes
+ - Maps the volume to the Host and starts scsi rescan on the host
+ - Switch replication direction of a migration relationship once host is mapped
+ - Again rescan the volume on the host to get the updated path details
+ - Delete source volume and migration relationship which was created
+ - Again rescan the multipath and expect migrated volume has the only path from destiantion cluster
+
+ Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
+ Devendra Mahajan (demahaj1@in.ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml
new file mode 100644
index 000000000..2862f73e1
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml
@@ -0,0 +1,143 @@
+- name: Using Storage Virtualize collection to migrate given volume
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars
+ collections:
+ - ibm.storage_virtualize
+
+ gather_facts: no
+ vars:
+ dest_vol_name: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ dest_host_name: "{{ host_name }}"
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [vol]
+ objectname: "{{ dest_vol_name }}"
+ log_path: /tmp/volinfo.debug
+
+ - name: Get the volume UID data
+ set_fact:
+ vol_uid: "{{ volinfo.Volume[0]['vdisk_UID'] | lower }}"
+ when: volinfo.Volume[0] is defined
+
+ - name: Creating Host on SVC
+ ibm_svc_host:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ name: "{{ dest_host_name }}"
+ state: present
+ iscsiname: "{{ application_host_iqn }}"
+
+ - name: map Vdisk to host
+ ibm_svc_vol_map:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ state: present
+ volname: "{{ dest_vol_name }}"
+ host: "{{ dest_host_name }}"
+ scsi: 0
+
+ - name: Create IP provisioning
+ ibm.storage_virtualize.ibm_svc_manage_ip:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ log_path: /tmp/playbook.debug
+ node: "{{ item.node_name }}"
+ port: "{{ item.port }}"
+ portset: "{{ item.portset }}"
+ ip_address: "{{ item.ip_address }}"
+ subnet_prefix: "{{ item.subnet_prefix }}"
+ gateway: "{{ item.gateway }}"
+ state: present
+ loop: "{{ application_iscsi_ip }}"
+
+ - name: Create iscsi session
+ shell: ssh {{ application_host_username }}@{{ application_host_ip }} "iscsiadm --mode discovery --type sendtargets --portal {{item.ip_address}} -l"
+ loop: "{{ application_iscsi_ip }}"
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - debug:
+ msg: "{{ multipath_var}}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device}}"
+
+ - name: Switch replication direction of a migration relationship
+ ibm_svc_manage_migration:
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ token: "{{ src_token.token }}"
+ state: switch
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device }}"
+
+ - name: Delete source volume and migration relationship
+ ibm_svc_manage_migration:
+ clustername: "{{ src_cluster_ip }}"
+ state: cleanup
+ source_volume: "{{ src_vol_name }}"
+ token: "{{ src_token.token }}"
+ log_path: /tmp/ansible.log
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device}}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml
new file mode 100644
index 000000000..0df875c4e
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml
@@ -0,0 +1,33 @@
+- name: Using Storage Virtualize collection to initiate migration
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Initiate a volume migration with replicate_hosts as false
+ ibm_svc_manage_migration:
+ source_volume: "{{ src_vol_name }}"
+ target_volume: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ remote_cluster: "{{ dest_cluster_name }}"
+ token: "{{ src_token.token }}"
+ state: initiate
+ replicate_hosts: false
+ remote_token: "{{ dest_token.token }}"
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ remote_pool: "{{ dest_cluster_pool_name }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt
new file mode 100644
index 000000000..a7e18d7df
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt
@@ -0,0 +1,36 @@
+src_cluster_name: Master
+src_cluster_ip: x.x.x.x
+src_cluster_username: username
+src_cluster_password: password
+
+dest_cluster_name: Aux_far
+dest_cluster_ip: y.y.y.y
+dest_cluster_username: username1
+dest_cluster_password: password1
+dest_cluster_pool_name: mdiskgrp0
+
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username2
+application_host_password: password2
+application_host_iqn: "iqn.1994-05.com.redhat:5e54d1815f55"
+
+application_iscsi_ip:
+ - node_name: node1
+ portset: portset0
+ ip_address: 192.168.100.121
+ subnet_prefix: 24
+ gateway: 192.168.100.1
+ port: 6
+ - node_name: node2
+ portset: portset0
+ ip_address: 192.168.100.122
+ subnet_prefix: 24
+ gateway: 192.168.100.1
+ port: 6
+
+src_vol_name: vdisk_application1
+host_name: linux_host
+dest_vol_name: vdisk_application1
+rel_name: r1
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml
new file mode 100644
index 000000000..6bf9bc7f6
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml
@@ -0,0 +1,29 @@
+- name: Using Storage Virtualize collection to create a volume group
+ hosts: localhost
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+ - domain: domain
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Create a new volume group
+ ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: vg1
+ state: present
+ - name: Create volumegroup with existing snapshotpolicy
+ ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: vg2
+ state: present
+ snapshotpolicy: snapshotpolicy2