summaryrefslogtreecommitdiffstats
path: root/ansible_collections/infinidat/infinibox/plugins/modules
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/infinidat/infinibox/plugins/modules')
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py298
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py277
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py269
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py250
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py192
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py655
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py424
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py290
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py403
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py352
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py619
12 files changed, 4029 insertions, 0 deletions
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py b/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
new file mode 100644
index 00000000..fe682cf3
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_cluster
+version_added: '2.9.0'
+short_description: Create, Delete and Modify Host Cluster on Infinibox
+description:
+ - This module creates, deletes or modifies host clusters on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Cluster Name
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies Cluster when present, removes when absent, or provides
+ details of a cluster when stat.
+ required: false
+ type: str
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ cluster_hosts:
+ description: A list of hosts to add to a cluster when state is present.
+ required: false
+ type: list
+ elements: dict
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new cluster
+ infini_cluster:
+ name: foo_cluster
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+try:
+ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ INFINISDK_IMPORT_ERROR,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_cluster,
+ unixMillisecondsToDate,
+ merge_two_dicts,
+ )
+except ModuleNotFoundError:
+ from infinibox import ( # Used when hacking
+ HAS_INFINISDK,
+ INFINISDK_IMPORT_ERROR,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_cluster,
+ unixMillisecondsToDate,
+ merge_two_dicts,
+ )
+
+try:
+ from infi.dtypes.iqn import make_iscsi_name
+ HAS_INFI_MOD = True
+except ImportError:
+ HAS_INFI_MOD = False
+
+
+@api_wrapper
+def get_host_by_name(system, host_name):
+ """Find a host by the name specified in the module"""
+ host = None
+
+ for a_host in system.hosts.to_list():
+ a_host_name = a_host.get_name()
+ if a_host_name == host_name:
+ host = a_host
+ break
+ return host
+
+
+@api_wrapper
+def create_cluster(module, system):
+ # print("create cluster")
+ changed = True
+ if not module.check_mode:
+ cluster = system.host_clusters.create(name=module.params['name'])
+ cluster_hosts = module.params['cluster_hosts']
+ for cluster_host in cluster_hosts:
+ if cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, cluster_host['host_name'])
+ cluster.add_host(host)
+ # print("Added host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
+ # else:
+ # print("Skipped adding (absent) host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
+ return changed
+
+
+@api_wrapper
+def update_cluster(module, system, cluster):
+ # print("update cluster")
+ changed = False
+
+ # e.g. of one host dict found in the module.params['cluster_hosts'] list:
+ # {host_name: <'some_name'>, host_cluster_state: <'present' or 'absent'>}
+ module_cluster_hosts = module.params['cluster_hosts']
+ current_cluster_hosts_names = [host.get_name() for host in cluster.get_field('hosts')]
+ # print("current_cluster_hosts_names:", current_cluster_hosts_names)
+ for module_cluster_host in module_cluster_hosts:
+ module_cluster_host_name = module_cluster_host['host_name']
+ # print("module_cluster_host_name:", module_cluster_host_name)
+ # Need to add host to cluster?
+ if module_cluster_host_name not in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = 'Cannot find host {0} to add to cluster {1}'.format(
+ module_cluster_host_name,
+ cluster.get_name(),
+ )
+ module.fail_json(msg=msg)
+ cluster.add_host(host)
+ # print("Added host {0} to cluster {1}".format(host.get_name(), cluster.get_name()))
+ changed = True
+ # Need to remove host from cluster?
+ elif module_cluster_host_name in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'absent':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = 'Cannot find host {0} to add to cluster {1}'.format(
+ module_cluster_host_name,
+ cluster.get_name(),
+ )
+ module.fail_json(msg=msg)
+ cluster.remove_host(host)
+ # print("Removed host {0} from cluster {1}".format(host.get_name(), cluster.get_name()))
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_cluster(module, cluster):
+ if not cluster:
+ msg = "Cluster {0} not found".format(cluster.get_name())
+ module.fail_json(msg=msg)
+ changed = True
+ if not module.check_mode:
+ cluster.delete()
+ return changed
+
+
+def get_sys_cluster(module):
+ system = get_system(module)
+ cluster = get_cluster(module, system)
+ return (system, cluster)
+
+
+def get_cluster_fields(cluster):
+ fields = cluster.get_fields(from_cache=True, raw_value=True)
+ created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
+ field_dict = dict(
+ hosts=[],
+ id=cluster.id,
+ created_at=created_at,
+ created_at_timezone=created_at_timezone,
+ )
+ hosts = cluster.get_hosts()
+ for host in hosts:
+ host_dict = {
+ 'host_id': host.id,
+ 'host_name': host.get_name(),
+ }
+ field_dict['hosts'].append(host_dict)
+ return field_dict
+
+
+def handle_stat(module):
+ system, cluster = get_sys_cluster(module)
+ cluster_name = module.params["name"]
+ if not cluster:
+ module.fail_json(msg='Cluster {0} not found'.format(cluster_name))
+ field_dict = get_cluster_fields(cluster)
+ result = dict(
+ changed=False,
+ msg='Cluster stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, cluster = get_sys_cluster(module)
+ cluster_name = module.params["name"]
+ if not cluster:
+ changed = create_cluster(module, system)
+ msg = 'Cluster {0} created'.format(cluster_name)
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ changed = update_cluster(module, system, cluster)
+ if changed:
+ msg = 'Cluster {0} updated'.format(cluster_name)
+ else:
+ msg = 'Cluster {0} required no changes'.format(cluster_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, cluster = get_sys_cluster(module)
+ cluster_name = module.params["name"]
+ if not cluster:
+ changed = False
+ msg = "Cluster {0} already absent".format(cluster_name)
+ else:
+ changed = delete_cluster(module, cluster)
+ msg = "Cluster {0} removed".format(cluster_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ state = module.params['state']
+ if state == 'present':
+ if module.params['cluster_hosts'] is None:
+ module.fail_json(msg='Option cluster_hosts, a list, must be provided')
+
+ cluster_hosts = module.params['cluster_hosts']
+ for host in cluster_hosts:
+ try:
+ # Check host has required keys
+ valid_keys = ['host_name', 'host_cluster_state']
+ for valid_key in valid_keys:
+ not_used = host[valid_key]
+ # Check host has no unknown keys
+ if len(host.keys()) != len(valid_keys):
+ raise KeyError
+ except KeyError:
+ msg = 'With state present, all cluster_hosts ' \
+ + 'require host_name and host_cluster_state key:values ' \
+ + 'and no others'
+ module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ cluster_hosts=dict(required=False, type="list", elements="dict"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFI_MOD:
+ module.fail_json(msg=missing_required_lib('infi.dtypes.iqn'))
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
new file mode 100644
index 00000000..f83e9b1f
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat(info@infinidat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_export
+version_added: '2.3.0'
+short_description: Create, Delete or Modify NFS Exports on Infinibox
+description:
+ - This module creates, deletes or modifies NFS exports on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Export name. Must start with a forward slash, e.g. name=/data.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies export when present, removes when absent, or provides
+ export details with stat.
+ required: false
+ default: "present"
+ choices: [ "stat", "present", "absent" ]
+ type: str
+ client_list:
+ description:
+ - List of dictionaries with client entries. See examples.
+ Check infini_export_client module to modify individual NFS client entries for export.
+ required: false
+ type: list
+ elements: dict
+ filesystem:
+ description:
+ - Name of exported file system.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - munch
+'''
+
+EXAMPLES = r'''
+- name: Export bar filesystem under foo pool as /data
+ infini_export:
+ name: /data01
+ filesystem: foo
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Get status of export bar filesystem under foo pool as /data
+ infini_export:
+ name: /data01
+ filesystem: foo
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Remove export bar filesystem under foo pool as /data
+ infini_export:
+ name: /data01
+ filesystem: foo
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Export and specify client list explicitly
+ infini_export:
+ name: /data02
+ filesystem: foo
+ client_list:
+ - client: 192.168.0.2
+ access: RW
+ no_root_squash: True
+ - client: 192.168.0.100
+ access: RO
+ no_root_squash: False
+ - client: 192.168.0.10-192.168.0.20
+ access: RO
+ no_root_squash: False
+ system: ibox001
+ user: admin
+ password: secret
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_filesystem,
+ get_export,
+ merge_two_dicts,
+)
+
+MUNCH_IMP_ERR = None
+try:
+ from munch import unmunchify
+ HAS_MUNCH = True
+except ImportError:
+ HAS_MUNCH = False
+ MUNCH_IMPORT_ERROR = traceback.format_exc()
+
+
+def transform(d):
+ return frozenset(d.items())
+
+
+def create_export(module, export, filesystem, system):
+ """ Create new filesystem or update existing one"""
+ if export:
+ raise AssertionError("Export {0} already exists".format(export.get_name()))
+ changed = False
+
+ name = module.params['name']
+ client_list = module.params['client_list']
+
+ if not module.check_mode:
+ export = system.exports.create(export_path=name, filesystem=filesystem)
+ if client_list:
+ export.update_permissions(client_list)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_export(module, export, filesystem, system):
+ """ Create new filesystem or update existing one"""
+ if not export:
+ raise AssertionError("Export {0} does not exist and cannot be updated".format(export.get_name()))
+
+ changed = False
+
+ name = module.params['name']
+ client_list = module.params['client_list']
+
+ if client_list:
+ # msg = "client_list: {0}, type: {1}".format(client_list, type(client_list))
+ # module.fail_json(msg=msg)
+ if set(map(transform, unmunchify(export.get_permissions()))) \
+ != set(map(transform, client_list)):
+ if not module.check_mode:
+ export.update_permissions(client_list)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_export(module, export):
+ """ Delete file system"""
+ if not module.check_mode:
+ export.delete()
+ changed = True
+ return changed
+
+
+def get_sys_exp_fs(module):
+ system = get_system(module)
+ filesystem = get_filesystem(module, system)
+ export = get_export(module, system)
+ return (system, export, filesystem)
+
+
+def get_export_fields(export):
+ fields = export.get_fields() # from_cache=True, raw_value=True)
+ export_id = fields.get('id', None)
+ permissions = fields.get('permissions', None)
+ enabled = fields.get('enabled', None)
+ field_dict = dict(
+ id=export_id,
+ permissions=permissions,
+ enabled=enabled,
+ )
+ return field_dict
+
+
+def handle_stat(module):
+ """
+ Gather stats on export and return. Changed is always False.
+ """
+ system, export, filesystem = get_sys_exp_fs(module)
+ if not export:
+ module.fail_json(msg='Export "{0}" of file system "{1}" not found'.format(
+ module.params['name'],
+ module.params['filesystem'],
+ ))
+
+ field_dict = get_export_fields(export)
+ result = dict(
+ changed=False,
+ msg='File system stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, export, filesystem = get_sys_exp_fs(module)
+ if not filesystem:
+ module.fail_json(msg='File system {0} not found'.format(module.params['filesystem']))
+ elif not export:
+ changed = create_export(module, export, filesystem, system)
+ module.exit_json(changed=changed, msg="File system export created")
+ else:
+ changed = update_export(module, export, filesystem, system)
+ module.exit_json(changed=changed, msg="File system export updated")
+
+
+def handle_absent(module):
+ system, export, filesystem = get_sys_exp_fs(module)
+ if not export:
+ changed = False
+ msg = "Export of {0} already absent".format(module.params['filesystem'])
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ changed = delete_export(module, export)
+ msg = "Export of {0} deleted".format(module.params['filesystem'])
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ filesystem=dict(required=True),
+ client_list=dict(type='list', elements='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_MUNCH:
+ module.fail_json(msg=missing_required_lib('munch'))
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
new file mode 100644
index 00000000..d3570578
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_export_client
+version_added: '2.3.0'
+short_description: Create, Delete or Modify NFS Client(s) for existing exports on Infinibox
+description:
+ - This module creates, deletes or modifys NFS client(s) for existing exports on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ client:
+ description:
+ - Client IP or Range. Ranges can be defined as follows
+ 192.168.0.1-192.168.0.254.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies client when present and removes when absent.
+ required: false
+ default: "present"
+ choices: [ "stat", "present", "absent" ]
+ type: str
+ access_mode:
+ description:
+ - Read Write or Read Only Access.
+ choices: [ "RW", "RO" ]
+ default: "RW"
+ required: false
+ type: str
+ no_root_squash:
+ description:
+ - Don't squash root user to anonymous. Will be set to "no" on creation if not specified explicitly.
+ type: bool
+ default: no
+ required: false
+ export:
+ description:
+ - Name of the export.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - munch
+'''
+
+EXAMPLES = r'''
+- name: Make sure nfs client 10.0.0.1 is configured for export. Allow root access
+ infini_export_client:
+ client: 10.0.0.1
+ access_mode: RW
+ no_root_squash: yes
+ export: /data
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Add multiple clients with RO access. Squash root privileges
+ infini_export_client:
+ client: "{{ item }}"
+ access_mode: RO
+ no_root_squash: no
+ export: /data
+ user: admin
+ password: secret
+ system: ibox001
+ with_items:
+ - 10.0.0.2
+ - 10.0.0.3
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_export,
+ merge_two_dicts,
+)
+
+MUNCH_IMP_ERR = None
+try:
+ from munch import Munch, unmunchify
+ HAS_MUNCH = True
+except ImportError:
+ MUNCH_IMPORT_ERROR = traceback.format_exc()
+ HAS_MUNCH = False
+
+
+@api_wrapper
+def update_client(module, export):
+ """
+ Update export client list. Note that this will replace existing clients.
+ """
+
+ changed = False
+
+ client = module.params['client']
+ access_mode = module.params['access_mode']
+ no_root_squash = module.params['no_root_squash']
+
+ client_list = export.get_permissions()
+ client_not_in_list = True
+
+ for item in client_list:
+ if item.client == client: # Update client
+ client_not_in_list = False
+ if item.access != access_mode:
+ item.access = access_mode
+ changed = True
+ if item.no_root_squash is not no_root_squash:
+ item.no_root_squash = no_root_squash
+ changed = True
+
+ # If access_mode and/or no_root_squash not passed as arguments to the module,
+ # use access_mode with RW value and set no_root_squash to False
+ if client_not_in_list: # Create client
+ changed = True
+ client_list.append(Munch(client=client, access=access_mode, no_root_squash=no_root_squash))
+
+ if changed:
+ for index, item in enumerate(client_list):
+ client_list[index] = unmunchify(item)
+ if not module.check_mode:
+ export.update_permissions(client_list)
+
+ return changed
+
+
+@api_wrapper
+def delete_client(module, export):
+ """Update export client list"""
+ if export is None and module.params['state'] == 'absent':
+ module.exit_json(changed=False)
+
+ changed = False
+
+ client = module.params['client']
+ client_list = export.get_permissions()
+
+ for index, item in enumerate(client_list):
+ if item.client == client:
+ changed = True
+ del client_list[index]
+
+ if changed:
+ for index, item in enumerate(client_list):
+ client_list[index] = unmunchify(item)
+ if not module.check_mode:
+ export.update_permissions(client_list)
+
+ return changed
+
+
+def get_sys_exp(module):
+ system = get_system(module)
+ export = get_export(module, system)
+ return (system, export)
+
+
+def get_export_client_fields(export, client_name):
+ fields = export.get_fields() # from_cache=True, raw_value=True)
+ permissions = fields.get('permissions', None)
+ for munched_perm in permissions:
+ perm = unmunchify(munched_perm)
+ if perm['client'] == client_name: # Found client
+ field_dict = dict(
+ access_mode=perm['access'],
+ no_root_squash=perm['no_root_squash'],
+ )
+ return field_dict
+ raise AssertionError("No client {0} match to exports found".format(client_name))
+
+
+def handle_stat(module):
+ system, export = get_sys_exp(module)
+ if not export:
+ module.fail_json(msg='Export {0} not found'.format(module.params['export']))
+ client_name = module.params['client']
+ field_dict = get_export_client_fields(export, client_name)
+ result = dict(
+ changed=False,
+ msg='Export client stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, export = get_sys_exp(module)
+ if not export:
+ msg = 'Export {0} not found'.format(module.params['export'])
+ module.fail_json(msg=msg)
+
+ changed = update_client(module, export)
+ msg = "Export client updated"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, export = get_sys_exp(module)
+ if not export:
+ changed = False
+ msg = "Export client already absent"
+ module.exit_json(changed=False, msg=msg)
+ else:
+ changed = delete_client(module, export)
+ msg = "Export client removed"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ client=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ access_mode=dict(choices=['RO', 'RW'], default='RW', type="str"),
+ no_root_squash=dict(type='bool', default='no'),
+ export=dict(required=True)
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_MUNCH:
+ module.fail_json(msg=missing_required_lib('munch'),
+ exception=MUNCH_IMPORT_ERROR)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
new file mode 100644
index 00000000..f9cd2bd5
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_fs
+version_added: '2.3.0'
+short_description: Create, Delete or Modify filesystems on Infinibox
+description:
+ - This module creates, deletes or modifies filesystems on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - File system name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies file system when present or removes when absent.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ type: str
+ thin_provision:
+ description:
+ - Whether the master file system should be thin or thick provisioned.
+ required: false
+ default: true
+ type: bool
+ pool:
+ description:
+ - Pool that will host file system.
+ required: true
+ type: str
+ size:
+ description:
+ - File system size in MB, GB or TB units. See examples.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - capacity
+'''
+
+EXAMPLES = r'''
+- name: Create new file system named foo under pool named bar
+ infini_fs:
+ name: foo
+ size: 1TB
+ pool: bar
+ thin_provision: true
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+try:
+ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_pool,
+ get_system,
+ get_filesystem
+ )
+except ModuleNotFoundError:
+ from infinibox import ( # Used when hacking
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_pool,
+ get_system,
+ get_filesystem
+ )
+
+CAPACITY_IMP_ERR = None
+try:
+ from capacity import KiB, Capacity
+ HAS_CAPACITY = True
+except ImportError:
+ HAS_CAPACITY = False
+
+
+@api_wrapper
+def create_filesystem(module, system):
+ """Create Filesystem"""
+ changed = True
+ if not module.check_mode:
+ if module.params['thin_provision']:
+ provisioning = 'THIN'
+ else:
+ provisioning = 'THICK'
+ filesystem = system.filesystems.create(
+ name=module.params['name'],
+ pool=get_pool(module, system),
+ provtype=provisioning,
+ )
+
+ if module.params['size']:
+ size = Capacity(module.params['size']).roundup(64 * KiB)
+ filesystem.update_size(size)
+ return changed
+
+
+@api_wrapper
+def update_filesystem(module, filesystem):
+ """Update Filesystem"""
+ changed = False
+ if module.params['size']:
+ size = Capacity(module.params['size']).roundup(64 * KiB)
+ if filesystem.get_size() != size:
+ if not module.check_mode:
+ filesystem.update_size(size)
+ changed = True
+
+ provisioning = str(filesystem.get_provisioning())
+ if provisioning == 'THICK' and module.params['thin_provision']:
+ if not module.check_mode:
+ filesystem.update_provisioning('THIN')
+ changed = True
+ if provisioning == 'THIN' and not module.params['thin_provision']:
+ if not module.check_mode:
+ filesystem.update_provisioning('THICK')
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_filesystem(module, filesystem):
+ """ Delete Filesystem"""
+ if not module.check_mode:
+ filesystem.delete()
+ module.exit_json(changed=True)
+
+
+def get_sys_pool_fs(module):
+ system = get_system(module)
+ pool = get_pool(module, system)
+ filesystem = get_filesystem(module, system)
+ return (system, pool, filesystem)
+
+
+def handle_stat(module):
+ system, pool, filesystem = get_sys_pool_fs(module)
+ if not pool:
+ module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
+ if not filesystem:
+ module.fail_json(msg='File system {0} not found'.format(module.params['name']))
+ fields = filesystem.get_fields() # from_cache=True, raw_value=True)
+ name = fields.get("name", None)
+ used = fields.get('used_size', None)
+ filesystem_id = fields.get('id', None)
+ provisioning = fields.get('provisioning', None)
+
+ result = dict(
+ changed=False,
+ name=name,
+ size=str(filesystem.get_size()),
+ used=str(used),
+ filesystem_id=filesystem_id,
+ provisioning=provisioning,
+ msg='File system stat found'
+ )
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, pool, filesystem = get_sys_pool_fs(module)
+ if not pool:
+ module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
+ if not filesystem:
+ changed = create_filesystem(module, system)
+ module.exit_json(changed=changed, msg="File system created")
+ else:
+ changed = update_filesystem(module, filesystem)
+ module.exit_json(changed=changed, msg="File system updated")
+
+
+def handle_absent(module):
+ system, pool, filesystem = get_sys_pool_fs(module)
+ if not pool or not filesystem:
+ module.exit_json(changed=False, msg="File system already absent")
+ else:
+ changed = delete_filesystem(module, filesystem)
+ module.exit_json(changed=changed, msg="File system removed")
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ pool=dict(required=True),
+ size=dict(),
+ thin_provision=dict(type=bool, default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ if not HAS_CAPACITY:
+ module.fail_json(msg=missing_required_lib('capacity'))
+
+ if module.params['size']:
+ try:
+ Capacity(module.params['size'])
+ except Exception:
+ module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
new file mode 100644
index 00000000..68d78546
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_host
+version_added: '2.3.0'
+short_description: Create, Delete or Modify Hosts on Infinibox
+description:
+ - This module creates, deletes or modifies hosts on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Host Name
+ required: true
+ state:
+ description:
+ - Creates/Modifies Host when present or removes when absent
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new host
+ infini_host:
+ name: foo.example.com
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from infi.dtypes.iqn import make_iscsi_name
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_host,
+ unixMillisecondsToDate,
+ merge_two_dicts,
+)
+
+
+@api_wrapper
+def create_host(module, system):
+
+ changed = True
+
+ if not module.check_mode:
+ host = system.hosts.create(name=module.params['name'])
+ return changed
+
+
+@api_wrapper
+def update_host(module, host):
+ changed = False
+ return changed
+
+
+@api_wrapper
+def delete_host(module, host):
+ changed = True
+ if not module.check_mode:
+ # May raise APICommandFailed if mapped, etc.
+ host.delete()
+ return changed
+
+
+def get_sys_host(module):
+ system = get_system(module)
+ host = get_host(module, system)
+ return (system, host)
+
+
+def get_host_fields(host):
+ fields = host.get_fields(from_cache=True, raw_value=True)
+ created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
+ field_dict = dict(
+ created_at=created_at,
+ created_at_timezone=created_at_timezone,
+ id=host.id,
+ iqns=[],
+ luns=[],
+ ports=[],
+ wwns=[],
+ )
+ luns = host.get_luns()
+ for lun in luns:
+ field_dict['luns'].append({'lun_id': lun.id,
+ 'lun_volume_id': lun.volume.id,
+ 'lun_volume_name': lun.volume.get_name(),
+ })
+ ports = host.get_ports()
+ for port in ports:
+ if str(type(port)) == "<class 'infi.dtypes.wwn.WWN'>":
+ field_dict['wwns'].append(str(port))
+ if str(type(port)) == "<class 'infi.dtypes.iqn.IQN'>":
+ field_dict['iqns'].append(str(port))
+ return field_dict
+
+
+def handle_stat(module):
+ system, host = get_sys_host(module)
+ host_name = module.params["name"]
+ if not host:
+ module.fail_json(msg='Host {0} not found'.format(host_name))
+ field_dict = get_host_fields(host)
+ result = dict(
+ changed=False,
+ msg='Host stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, host = get_sys_host(module)
+ host_name = module.params["name"]
+ if not host:
+ changed = create_host(module, system)
+ msg = 'Host {0} created'.format(host_name)
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ changed = update_host(module, host)
+ msg = 'Host {0} updated'.format(host_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, host = get_sys_host(module)
+ host_name = module.params["name"]
+ if not host:
+ msg = "Host {0} already absent".format(host_name)
+ module.exit_json(changed=False, msg=msg)
+ else:
+ changed = delete_host(module, host)
+ msg = "Host {0} removed".format(host_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
new file mode 100644
index 00000000..e3757e02
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
@@ -0,0 +1,655 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_map
+version_added: '2.9.0'
+short_description: Create and Delete mapping of a volume to a host or cluster on Infinibox
+description:
+ - This module creates or deletes mappings of volumes to hosts or clusters
+ on Infinibox.
+ - For Linux hosts, after calling this module, the playbook should execute "rescan-scsi-bus.sh" on the host when creating mappings.
+ - When removing mappings "rescan-scsi-bus.sh --remove" should be called.
+ - For Windows hosts, consider using "'rescan' | diskpart" or "Update-HostStorageCache".
+author: David Ohlemacher (@ohlemacher)
+options:
+ host:
+ description:
+ - Host Name
+ required: false
+ cluster:
+ description:
+ - Cluster Name
+ required: false
+ state:
+ description:
+ - Creates mapping when present or removes when absent, or provides
+ details of a mapping when stat.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ type: str
+ volume:
+ description:
+ - Volume name to map to the host.
+ required: true
+ lun:
+ description:
+ - Volume lun.
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Map a volume to an existing host
+ infini_map:
+ host: foo.example.com
+ volume: bar
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Map a volume to an existing cluster
+ infini_map:
+ cluster: test-cluster
+ volume: bar
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Unmap volume bar from host foo.example.com
+ infini_map:
+ host: foo.example.com
+ volume: bar
+ state: absent
+ system: ibox01
+ user: admin
+ password: secret
+
+- name: Stat mapping of volume bar to host foo.example.com
+ infini_map:
+ host: foo.example.com
+ volume: bar
+ state: stat
+ system: ibox01
+ user: admin
+ password: secret
+'''
+
+
+# RETURN = r''' # '''
+
+import traceback
+# import sh
+
+# rescan_scsi = sh.Command("rescan-scsi-bus.sh")
+# rescan_scsi_remove = rescan_scsi.bake("--remove")
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ get_cluster,
+ get_host,
+ get_pool,
+ get_system,
+ get_volume,
+ infinibox_argument_spec,
+ merge_two_dicts
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed, ObjectNotFound
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+def vol_is_mapped_to_host(volume, host):
+ volume_fields = volume.get_fields()
+ volume_id = volume_fields.get('id')
+ host_luns = host.get_luns()
+ # print('volume id: {0}'.format(volume_id))
+ # print('host luns: {0}'.format(str(host_luns)))
+ for lun in host_luns:
+ if lun.volume == volume:
+ # print('found mapped volume: {0}'.format(volume))
+ return True
+ return False
+
+
+def vol_is_mapped_to_cluster(volume, cluster):
+ volume_fields = volume.get_fields()
+ volume_id = volume_fields.get('id')
+ cluster_luns = cluster.get_luns()
+ # print('volume id: {0}'.format(volume_id))
+ # print('host luns: {0}'.format(str(host_luns)))
+
+ for lun in cluster_luns:
+ if lun.volume == volume:
+ # print('found mapped volume: {0}'.format(volume))
+ return True
+ return False
+
+
+def find_host_lun_use(module, host, volume):
+ check_result = {'lun_used': False, 'lun_volume_matches': False}
+ desired_lun = module.params['lun']
+
+ if desired_lun:
+ for host_lun in host.get_luns():
+ if desired_lun == host_lun.lun:
+ if host_lun.volume == volume:
+ check_result = {'lun_used': True, 'lun_volume_matches': True}
+ else:
+ check_result = {'lun_used': True, 'lun_volume_matches': False}
+
+ return check_result
+
+
+def find_cluster_lun_use(module, cluster, volume):
+ check_result = {'lun_used': False, 'lun_volume_matches': False}
+ desired_lun = module.params['lun']
+
+ if desired_lun:
+ for cluster_lun in cluster.get_luns():
+ if desired_lun == cluster.lun:
+ if cluster.volume == volume:
+ check_result = {'lun_used': True, 'lun_volume_matches': True}
+ else:
+ check_result = {'lun_used': True, 'lun_volume_matches': False}
+
+ return check_result
+
+
+def find_host_lun(host, volume):
+ found_lun = None
+ luns = host.get_luns()
+
+ for lun in luns:
+ if lun.volume == volume:
+ found_lun = lun.lun
+ return found_lun
+
+
+def find_cluster_lun(cluster, volume):
+ found_lun = None
+ luns = cluster.get_luns()
+
+ for lun in luns:
+ if lun.volume == volume:
+ found_lun = lun.lun
+ return found_lun
+
+
+@api_wrapper
+def create_mapping(module, system):
+ """
+ Create mapping of volume to host or cluster. If already mapped, exit_json with changed False.
+ """
+
+ host_name = module.params['host']
+ cluster_name = module.params['cluster']
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
+
+ if host:
+ changed = create_mapping_to_host(module, system)
+ elif cluster:
+ changed = create_mapping_to_cluster(module, system)
+ else:
+ msg = "A programming error has occurred in create_mapping()"
+ module.fail_json(msg=msg)
+
+ # if changed:
+ # with sh.contrib.sudo:
+ # print("rescanning")
+ # rescan_scsi()
+
+ return changed
+
+
+@api_wrapper
+def create_mapping_to_cluster(module, system):
+ """
+ Create mapping of volume to cluster. If already mapped, exit_json with changed False.
+ """
+ changed = False
+
+ cluster = get_cluster(module, system)
+ volume = get_volume(module, system)
+
+ lun_use = find_cluster_lun_use(module, cluster, volume)
+ if lun_use['lun_used']:
+ msg = "Cannot create mapping of volume '{0}' to cluster '{1}' using lun '{2}'. Lun in use.".format(
+ volume.get_name(),
+ cluster.get_name(),
+ module.params['lun'])
+ module.fail_json(msg=msg)
+
+ try:
+ desired_lun = module.params['lun']
+ if not module.check_mode:
+ cluster.map_volume(volume, lun=desired_lun)
+ changed = True
+ except APICommandFailed as err:
+ if "is already mapped" not in str(err):
+ module.fail_json('Cannot map volume {0} to cluster {1}: {2}. Already mapped.'.format(
+ module.params['volume'],
+ module.params['cluster'],
+ str(err)))
+
+ return changed
+
+
+@api_wrapper
+def create_mapping_to_host(module, system):
+ """
+ Create mapping of volume to host. If already mapped, exit_json with changed False.
+ """
+ changed = False
+
+ host = system.hosts.get(name=module.params['host'])
+ volume = get_volume(module, system)
+
+ lun_use = find_host_lun_use(module, host, volume)
+ if lun_use['lun_used']:
+ msg = "Cannot create mapping of volume '{0}' to host '{1}' using lun '{2}'. Lun in use.".format(
+ volume.get_name(),
+ host.get_name(),
+ module.params['lun'])
+ module.fail_json(msg=msg)
+
+ try:
+ desired_lun = module.params['lun']
+ if not module.check_mode:
+ host.map_volume(volume, lun=desired_lun)
+ changed = True
+ except APICommandFailed as err:
+ if "is already mapped" not in str(err):
+ module.fail_json('Cannot map volume {0} to host {1}: {2}. Already mapped.'.format(
+ module.params['volume'],
+ module.params['host'],
+ str(err)))
+
+ return changed
+
+
+@api_wrapper
+def update_mapping_to_host(module, system):
+ host = get_host(module, system)
+ volume = get_volume(module, system)
+ desired_lun = module.params['lun']
+
+ if not vol_is_mapped_to_host(volume, host):
+ msg = "Volume {0} is not mapped to host {1}".format(
+ volume.get_name(),
+ host.get_name(),
+ )
+ module.fail_json(msg=msg)
+
+ if desired_lun:
+ found_lun = find_host_lun(host, volume)
+ if found_lun != desired_lun:
+ msg = "Cannot change the lun from '{0}' to '{1}' for existing mapping of volume '{2}' to host '{3}'".format(
+ found_lun,
+ desired_lun,
+ volume.get_name(),
+ host.get_name())
+ module.fail_json(msg=msg)
+
+ changed = False
+ return changed
+
+
+@api_wrapper
+def update_mapping_to_cluster(module, system):
+ cluster = get_cluster(module, system)
+ volume = get_volume(module, system)
+ desired_lun = module.params['lun']
+
+ if not vol_is_mapped_to_cluster(volume, cluster):
+ msg = "Volume {0} is not mapped to cluster {1}".format(
+ volume.get_name(),
+ cluster.get_name(),
+ )
+ module.fail_json(msg=msg)
+
+ if desired_lun:
+ found_lun = find_cluster_lun(cluster, volume)
+ if found_lun != desired_lun:
+ msg = "Cannot change the lun from '{0}' to '{1}' for existing mapping of volume '{2}' to cluster '{3}'".format(
+ found_lun,
+ desired_lun,
+ volume.get_name(),
+ cluster.get_name())
+ module.fail_json(msg=msg)
+
+ changed = False
+ return changed
+
+
+@api_wrapper
+def delete_mapping(module, system):
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
+ if host:
+ changed = delete_mapping_to_host(module, system)
+ elif cluster:
+ changed = delete_mapping_to_cluster(module, system)
+ else:
+ msg = "A programming error has occurred in delete_mapping()"
+ module.fail_json(msg=msg)
+
+ # if changed:
+ # with sh.contrib.sudo:
+ # print("rescanning --remove")
+ # rescan_scsi_remove()
+
+ return changed
+
+
+@api_wrapper
+def delete_mapping_to_host(module, system):
+ """
+ Remove mapping of volume from host. If the either the volume or host
+ do not exist, then there should be no mapping to unmap. If unmapping
+ generates a key error with 'has no logical units' in its message, then
+ the volume is not mapped. Either case, return changed=False.
+ """
+ changed = False
+ msg = ""
+
+ if not module.check_mode:
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+
+ if volume and host:
+ try:
+ existing_lun = find_host_lun(host, volume)
+ host.unmap_volume(volume)
+ changed = True
+ msg = "Volume '{0}' was unmapped from host '{1}' freeing lun '{2}'".format(
+ module.params['volume'],
+ module.params['host'],
+ existing_lun,
+ )
+
+ except KeyError as err:
+ if 'has no logical units' not in str(err):
+ module.fail_json('Cannot unmap volume {0} from host {1}: {2}'.format(
+ module.params['volume'],
+ module.params['host'],
+ str(err)))
+ else:
+ msg = "Volume {0} was not mapped to host {1} and so unmapping was not executed".format(
+ module.params['volume'],
+ module.params['host'],
+ )
+ else:
+ msg = "Either volume {0} or host {1} does not exist. Unmapping was not executed".format(
+ module.params['volume'],
+ module.params['host'],
+ )
+ else: # check_mode
+ changed = True
+
+ module.exit_json(msg=msg, changed=changed)
+
+
+@api_wrapper
+def delete_mapping_to_cluster(module, system):
+ """
+ Remove mapping of volume from cluster. If the either the volume or cluster
+ do not exist, then there should be no mapping to unmap. If unmapping
+ generates a key error with 'has no logical units' in its message, then
+ the volume is not mapped. Either case, return changed=False.
+ """
+ changed = False
+ msg = ""
+
+ if not module.check_mode:
+ volume = get_volume(module, system)
+ cluster = get_cluster(module, system)
+
+ if volume and cluster:
+ try:
+ existing_lun = find_cluster_lun(cluster, volume)
+ cluster.unmap_volume(volume)
+ changed = True
+ msg = "Volume '{0}' was unmapped from cluster '{1}' freeing lun '{2}'".format(
+ module.params['volume'],
+ module.params['cluster'],
+ existing_lun,
+ )
+ except KeyError as err:
+ if 'has no logical units' not in str(err):
+ module.fail_json('Cannot unmap volume {0} from cluster {1}: {2}'.format(
+ module.params['volume'],
+ module.params['cluster'],
+ str(err)))
+ else:
+ msg = "Volume {0} was not mapped to cluster {1} and so unmapping was not executed".format(
+ module.params['volume'],
+ module.params['cluster'],
+ )
+ else:
+ msg = "Either volume {0} or cluster {1} does not exist. Unmapping was not executed".format(
+ module.params['volume'],
+ module.params['cluster'],
+ )
+ else: # check_mode
+ changed = True
+
+ module.exit_json(msg=msg, changed=changed)
+
+
+def get_sys_vol_host_cluster(module):
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
+ return (system, volume, host, cluster)
+
+
+def get_sys_vol_cluster(module):
+ system = get_system(module)
+ volume = get_volume(module, system)
+ cluster = get_cluster(module, system)
+ return (system, volume, cluster)
+
+
+def get_mapping_fields(volume, host_or_cluster):
+ luns = host_or_cluster.get_luns()
+ for lun in luns:
+ if volume.get_name() == lun.volume.get_name():
+ field_dict = dict(
+ id=lun.id,
+ )
+ return field_dict
+ return dict()
+
+
+def handle_stat(module):
+ system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ volume_name = module.params['volume']
+
+ host_name = module.params['host']
+ if not host_name:
+ host_name = "not specified"
+
+ cluster_name = module.params['cluster']
+ if not cluster_name:
+ cluster_name = "not specified"
+
+ if not volume:
+ module.fail_json(msg='Volume {0} not found'.format(volume_name))
+ if not host and not cluster:
+ module.fail_json(msg='Neither host [{0}] nor cluster [{1}] found'.format(host_name, cluster_name))
+ if (not host or not vol_is_mapped_to_host(volume, host)) \
+ and (not cluster or not vol_is_mapped_to_cluster(volume, cluster)):
+ msg = 'Volume {0} is mapped to neither host {1} nor cluster {2}'.format(volume_name, host_name, cluster_name)
+ module.fail_json(msg=msg)
+ if host:
+ found_lun = find_host_lun(host, volume)
+ field_dict = get_mapping_fields(volume, host)
+ if found_lun is not None:
+ msg = 'Volume {0} is mapped to host {1} using lun {2}'.format(volume_name, host_name, found_lun),
+ result = dict(
+ changed=False,
+ volume_lun=found_lun,
+ msg=msg,
+ )
+ else:
+ msg = 'Volume {0} is not mapped to host {1}'.format(volume_name, host_name)
+ module.fail_json(msg=msg)
+ elif cluster:
+ found_lun = find_cluster_lun(cluster, volume)
+ field_dict = get_mapping_fields(volume, cluster)
+ if found_lun is not None:
+ msg = 'Volume {0} is mapped to cluster {1} using lun {2}'.format(volume_name, cluster_name, found_lun)
+ result = dict(
+ changed=False,
+ volume_lun=found_lun,
+ msg=msg,
+ )
+ else:
+ msg = 'Volume {0} is not mapped to cluster {1}'.format(volume_name, cluster_name)
+ module.fail_json(msg=msg)
+ else:
+ msg = 'A programming error has occurred in handle_stat()'
+ module.fail_json(msg=msg)
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
+ cluster_name = module.params['cluster']
+ if not volume:
+ module.fail_json(changed=False, msg='Volume {0} not found'.format(volume_name))
+ if not host and not cluster:
+ if not host_name:
+ host_name = "not specified"
+ if not cluster_name:
+ cluster_name = "not specified"
+ module.fail_json(changed=False, msg='Neither host [{0}] nor cluster [{1}] found'.format(host_name, cluster_name))
+ if host:
+ if not vol_is_mapped_to_host(volume, host):
+ changed = create_mapping(module, system)
+ # TODO: Why is find_host_lun() returning None after creating the mapping?
+ # host.get_luns() returns an empty list, why?
+ # existing_lun = find_host_lun(host, volume)
+ # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
+ # volume.get_name(),
+ # host.get_name(),
+ # existing_lun,
+ # )
+ msg = "Volume '{0}' map to host '{1}' created".format(volume_name, host_name)
+ else:
+ changed = update_mapping_to_host(module, system)
+ existing_lun = find_host_lun(host, volume)
+ msg = "Volume '{0}' map to host '{1}' already exists using lun '{2}'".format(volume_name, host_name, existing_lun)
+ elif cluster:
+ if not vol_is_mapped_to_cluster(volume, cluster):
+ changed = create_mapping(module, system)
+ # TODO: Why is find_host_lun() returning None after creating the mapping?
+ # host.get_luns() returns an empty list, why?
+ # existing_lun = find_host_lun(host, volume)
+ # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
+ # volume.get_name(),
+ # host.get_name(),
+ # existing_lun,
+ # )
+ msg = "Volume '{0}' map to cluster '{1}' created".format(volume_name, cluster_name)
+ else:
+ changed = update_mapping_to_cluster(module, system)
+ existing_lun = find_cluster_lun(cluster, volume)
+ msg = "Volume '{0}' map to cluster '{1}' already exists using lun '{2}'".format(volume_name, cluster_name, existing_lun)
+
+ result = dict(
+ changed=changed,
+ msg=msg,
+ )
+ module.exit_json(**result)
+
+
+def handle_absent(module):
+ system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
+ cluster_name = module.params['cluster']
+ if not volume or (not host and not cluster):
+ module.exit_json(changed=False, msg='Mapping of volume {0} to host {1} or cluster {2} already absent'.format(volume_name, host_name, cluster_name))
+ else:
+ changed = delete_mapping(module, system)
+ module.exit_json(changed=changed, msg="Mapping removed")
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_parameters(module):
+ volume_name = module.params['volume']
+ host_name = module.params['host']
+ cluster_name = module.params['cluster']
+ if host_name and cluster_name:
+ msg = "infini_map requires a host or a cluster but not both to be provided"
+ module.fail_json(msg=msg)
+
+ if not host_name and not cluster_name:
+ msg = "infini_map requires a host or a cluster to be provided"
+ module.fail_json(msg=msg)
+
+
+def main():
+ """
+ Gather auguments and manage mapping of vols to hosts.
+ """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ host=dict(required=False, default=""),
+ cluster=dict(required=False, default=""),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ volume=dict(required=True),
+ lun=dict(type=int),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ check_parameters(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
new file mode 100644
index 00000000..013d86e5
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_network_space
+version_added: '2.12.0'
+short_description: Create, Delete and Modify network spaces on Infinibox
+description:
+ - This module creates, deletes or modifies network spaces on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Network space name
+ required: true
+ state:
+ description:
+ - Creates/Modifies network spaces when present. Removes when absent. Shows status when stat.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ interfaces:
+ description:
+ - A list of interfaces for the space.
+ required: false
+ type: list
+ elements: str
+ service:
+ description:
+ - Choose a service.
+ required: false
+ default: "replication"
+ choices: ["replication", "NAS", "iSCSI"]
+ mtu:
+ description:
+ - Set an MTU. If not specified, defaults to 1500 bytes.
+ required: false
+ type: int
+ network:
+ description:
+ - Starting IP address.
+ required: false
+ type: str
+ netmask:
+ description:
+ - Network mask.
+ required: false
+ type: int
+ ips:
+ description:
+ - List of IPs.
+ required: false
+ default: []
+ type: list
+ elements: str
+ rate_limit:
+ description:
+ - Specify the throughput limit per node.
+ - The limit is specified in Mbps, megabits per second (not megabytes).
+ - Note the limit affects NFS, iSCSI and async-replication traffic.
+ - It does not affect sync-replication or active-active traffic.
+ required: false
+ type: int
+
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new network space
+ infini_network_space:
+ name: iSCSI
+ state: present
+ interfaces:
+ - 1680
+ - 1679
+ - 1678
+ service: ISCSI_SERVICE
+ netmask: 19
+ network: 172.31.32.0
+ default_gateway: 172.31.63.254
+ ips:
+ - 172.31.32.145
+ - 172.31.32.146
+ - 172.31.32.147
+ - 172.31.32.148
+ - 172.31.32.149
+ - 172.31.32.150
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ unixMillisecondsToDate,
+ merge_two_dicts,
+ get_net_space,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+ from infinisdk.core.exceptions import ObjectNotFound
+ from infi.dtypes.iqn import make_iscsi_name
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def create_empty_network_space(module, system):
+ # Create network space
+ network_space_name = module.params["name"]
+ service = module.params["service"]
+ rate_limit = module.params["rate_limit"]
+ mtu = module.params["mtu"]
+ network_config = {
+ "netmask": module.params["netmask"],
+ "network": module.params["network"],
+ "default_gateway": module.params["default_gateway"],
+ }
+ interfaces = module.params["interfaces"]
+
+ # print("Creating network space {0}".format(network_space_name))
+ product_id = system.api.get('system/product_id')
+ # print("api: {0}".format(product_id.get_result()))
+
+ net_create_url = "network/spaces"
+ net_create_data = {
+ "name": network_space_name,
+ "service": service,
+ "network_config": network_config,
+ "interfaces": interfaces,
+ }
+ if rate_limit:
+ net_create_data["rate_limit"] = rate_limit
+ if mtu:
+ net_create_data["mtu"] = mtu
+
+ net_create = system.api.post(
+ path=net_create_url,
+ data=net_create_data
+ )
+ # print("net_create: {0}".format(net_create))
+
+
+@api_wrapper
+def find_network_space_id(module, system):
+ """
+ Find the ID of this network space
+ """
+ network_space_name = module.params["name"]
+ net_id_url = "network/spaces?name={0}&fields=id".format(network_space_name)
+ net_id = system.api.get(
+ path=net_id_url
+ )
+ result = net_id.get_json()['result'][0]
+ space_id = result['id']
+ # print("Network space has ID {0}".format(space_id))
+ return space_id
+
+
+@api_wrapper
+def add_ips_to_network_space(module, system, space_id):
+ network_space_name = module.params["name"]
+ # print("Adding IPs to network space {0}".format(network_space_name))
+
+ ips = module.params["ips"]
+ for ip in ips:
+ ip_url = "network/spaces/{0}/ips".format(space_id)
+ ip_data = ip
+ ip_add = system.api.post(
+ path=ip_url,
+ data=ip_data
+ )
+ # print("add_ips json: {0}".format(ip_add.get_json()))
+ result = ip_add.get_json()['result']
+ # print("add ip result: {0}".format(result))
+
+
+@api_wrapper
+def create_network_space(module, system):
+ if not module.check_mode:
+ # Create space
+ create_empty_network_space(module, system)
+ # Find space's ID
+ space_id = find_network_space_id(module, system)
+ # Add IPs to space
+ add_ips_to_network_space(module, system, space_id)
+
+ changed = True
+ else:
+ changed = False
+
+ return changed
+
+
+def update_network_space(module, system):
+ """
+ Update network space.
+ TODO - This is incomplete and will not update the space.
+ It will instead return changed=False and a message.
+ To implement this we will need to find the existing space.
+ For each field that we support updating, we need to compare existing
+ to new values and if different update. We will need to iterate
+ over the settings or we will receive:
+ Status: 400
+ Code: NOT_SUPPORTED_MULTIPLE_UPDATE
+ """
+ changed = False
+ msg = "Update is not supported yet"
+ module.exit_json(changed=changed, msg=msg)
+
+ # TODO Everything below is incomplete
+ # Update network space
+ network_space_name = module.params["name"]
+ service = module.params["service"]
+ network_config = {
+ "netmask": module.params["netmask"],
+ "network": module.params["network"],
+ "default_gateway": module.params["default_gateway"],
+ }
+ interfaces = module.params["interfaces"]
+
+ # print("Updating network space {0}".format(network_space_name))
+
+ # Find space's ID
+ space_id = find_network_space_id(module, system)
+
+ net_url = "network/spaces/{0}".format(space_id)
+ net_data = {
+ "name": network_space_name,
+ "service": service,
+ "network_config": network_config,
+ "interfaces": interfaces,
+ }
+
+ # Find existing space
+ net_existing = system.api.get(path=net_url)
+
+ net_update = system.api.put(
+ path=net_url,
+ data=net_data
+ )
+ # print("net_update: {0}".format(net_update))
+
+
+def get_network_space_fields(module, network_space):
+ fields = network_space.get_fields(from_cache=True, raw_value=True)
+
+ field_dict = dict(
+ name=fields["name"],
+ network_space_id=fields["id"],
+ netmask=fields["network_config"]["netmask"],
+ network=fields["network_config"]["network"],
+ default_gateway=fields["network_config"]["default_gateway"],
+ interface_ids=fields["interfaces"],
+ service=fields["service"],
+ ips=fields["ips"],
+ properties=fields["properties"],
+ automatic_ip_failback=fields["automatic_ip_failback"],
+ mtu=fields["mtu"],
+ rate_limit=fields["rate_limit"],
+ )
+ return field_dict
+
+
+def handle_stat(module):
+ network_space_name = module.params["name"]
+ system = get_system(module)
+ net_space = get_net_space(module, system)
+
+ if not net_space:
+ module.fail_json(msg="Network space {0} not found".format(network_space_name))
+
+ field_dict = get_network_space_fields(module, net_space)
+ result = dict(
+ changed=False, msg="Network space {0} stat found".format(network_space_name)
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """
+ If it does not already exist, create namespace. Otherwise, update namespace.
+ """
+ network_space_name = module.params["name"]
+ system = get_system(module)
+ net_space = get_net_space(module, system)
+ if net_space:
+ changed = update_network_space(module, net_space)
+ msg = "Host {0} updated".format(network_space_name)
+ else:
+ changed = create_network_space(module, system)
+ msg = "Network space {0} created".format(network_space_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """
+ Remove a namespace. First, may disable and remove the namespace's IPs.
+ """
+ network_space_name = module.params["name"]
+ system = get_system(module)
+ network_space = get_net_space(module, system)
+ if not network_space:
+ changed = False
+ msg = "Network space {0} already absent".format(network_space_name)
+ else:
+ # Find IPs from space
+ ips = list(network_space.get_ips())
+
+ # Disable and delete IPs from space
+ if not module.check_mode:
+ for ip in ips:
+ addr = ip["ip_address"]
+
+ # print("Disabling IP {0}".format(addr))
+ try:
+ network_space.disable_ip_address(addr)
+ except APICommandFailed as err:
+ if err.error_code == "IP_ADDRESS_ALREADY_DISABLED":
+ pass
+ # print("Already disabled IP {0}".format(addr))
+ else:
+ # print("Failed to disable IP {0}".format(addr))
+ module.fail_json(
+ msg="Disabling of network space {0} IP {1} failed".format(
+ network_space_name, addr
+ )
+ )
+
+ # print("Removing IP {0}".format(addr))
+ try:
+ network_space.remove_ip_address(addr)
+ except Exception as err:
+ module.fail_json(
+ msg="Removal of network space {0} IP {1} failed: {2}".format(
+ network_space_name, addr, err
+ )
+ )
+
+ # Delete space
+ network_space.delete()
+ changed = True
+ msg = "Network space {0} removed".format(network_space_name)
+ else:
+ changed = False
+ msg = "Network space {0} not altered due to checkmode".format(
+ network_space_name
+ )
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(
+ msg="Internal handler error. Invalid state: {0}".format(state)
+ )
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(
+ default="present", required=False, choices=["stat", "present", "absent"]
+ ),
+ service=dict(
+ default="replication",
+ required=False,
+ choices=["replication", "NAS_SERVICE", "ISCSI_SERVICE"],
+ ),
+ mtu=dict(default=None, required=False, type=int),
+ network=dict(default=None, required=False),
+ netmask=dict(default=None, required=False, type=int),
+ default_gateway=dict(default=None, required=False),
+ interfaces=dict(default=list(), required=False, type="list", elements="int"),
+ network_config=dict(default=dict(), required=False, type=dict),
+ ips=dict(default=list(), required=False, type="list", elements="str"),
+ rate_limit=dict(default=None, required=False, type=int),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
new file mode 100644
index 00000000..d02657a1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_pool
+version_added: '2.3.0'
+short_description: Create, Delete and Modify Pools on Infinibox
+description:
+ - This module to creates, deletes or modifies pools on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Pool Name
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies Pool when present or removes when absent
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ type: str
+ size:
+ description:
+ - Pool Physical Capacity in MB, GB or TB units.
+ If pool size is not set on pool creation, size will be equal to 1TB.
+ See examples.
+ required: false
+ type: str
+ vsize:
+ description:
+ - Pool Virtual Capacity in MB, GB or TB units.
+ If pool vsize is not set on pool creation, Virtual Capacity will be equal to Physical Capacity.
+ See examples.
+ required: false
+ type: str
+ ssd_cache:
+ description:
+ - Enable/Disable SSD Cache on Pool
+ required: false
+ default: yes
+ type: bool
+ compression:
+ description:
+ - Enable/Disable Compression on Pool
+ required: false
+ default: yes
+ type: bool
+
+notes:
+ - Infinibox Admin level access is required for pool modifications
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - capacity
+'''
+
+EXAMPLES = r'''
+- name: Make sure pool foo exists. Set pool physical capacity to 10TB
+ infini_pool:
+ name: foo
+ size: 10TB
+ vsize: 10TB
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Disable SSD Cache on pool
+ infini_pool:
+ name: foo
+ ssd_cache: no
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Disable Compression on pool
+ infini_pool:
+ name: foo
+ compression: no
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_pool,
+ get_system,
+)
+
+
+HAS_CAPACITY = True
+try:
+ from capacity import KiB, Capacity
+except ImportError:
+ HAS_CAPACITY = False
+
+
+@api_wrapper
+def create_pool(module, system):
+ """Create Pool"""
+ name = module.params['name']
+ size = module.params['size']
+ vsize = module.params['vsize']
+ ssd_cache = module.params['ssd_cache']
+ compression = module.params['compression']
+
+ if not module.check_mode:
+ if not size and not vsize:
+ pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity('1TB'))
+ elif size and not vsize:
+ pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(size))
+ elif not size and vsize:
+ pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity(vsize))
+ else:
+ pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(vsize))
+ # Default value of ssd_cache is True. Disable ssd caching if False
+ if not ssd_cache:
+ pool.update_ssd_enabled(ssd_cache)
+ # Default value of compression is True. Disable compression if False
+ if not compression:
+ pool.update_compression_enabled(compression)
+
+ module.exit_json(changed=True, msg='Pool created')
+
+
+@api_wrapper
+def update_pool(module, system, pool):
+ """Update Pool"""
+ changed = False
+
+ size = module.params['size']
+ vsize = module.params['vsize']
+ # ssd_cache = module.params['ssd_cache']
+ compression = module.params['compression']
+
+ # Roundup the capacity to mimic Infinibox behaviour
+ if size:
+ physical_capacity = Capacity(size).roundup(6 * 64 * KiB)
+ if pool.get_physical_capacity() != physical_capacity:
+ if not module.check_mode:
+ pool.update_physical_capacity(physical_capacity)
+ changed = True
+
+ if vsize:
+ virtual_capacity = Capacity(vsize).roundup(6 * 64 * KiB)
+ if pool.get_virtual_capacity() != virtual_capacity:
+ if not module.check_mode:
+ pool.update_virtual_capacity(virtual_capacity)
+ changed = True
+
+ # if pool.is_ssd_enabled() != ssd_cache:
+ # if not module.check_mode:
+ # pool.update_ssd_enabled(ssd_cache)
+ # changed = True
+
+ if pool.is_compression_enabled() != compression:
+ if not module.check_mode:
+ pool.update_compression_enabled(compression)
+ changed = True
+
+ if changed:
+ msg = 'Pool updated'
+ else:
+ msg = 'Pool did not require updating'
+ module.exit_json(changed=changed, msg=msg)
+
+
+@api_wrapper
+def delete_pool(module, pool):
+ """Delete Pool"""
+ if not module.check_mode:
+ pool.delete()
+ msg = 'Pool deleted'
+ module.exit_json(changed=True, msg=msg)
+
+
+def get_sys_pool(module):
+ system = get_system(module)
+ pool = get_pool(module, system)
+ return (system, pool)
+
+
+def handle_stat(module):
+ system, pool = get_sys_pool(module)
+ if not pool:
+ module.fail_json(msg='Pool {0} not found'.format(module.params['name']))
+ fields = pool.get_fields()
+ # print('fields: {0}'.format(fields))
+ free_physical_capacity = fields.get('free_physical_capacity', None)
+ pool_id = fields.get('id', None)
+
+ result = dict(
+ changed=False,
+ free_physical_capacity=str(free_physical_capacity),
+ id=pool_id,
+ msg='Pool stat found'
+ )
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, pool = get_sys_pool(module)
+ if not pool:
+ create_pool(module, system)
+ module.exit_json(changed=True, msg="Pool created")
+ else:
+ changed = update_pool(module, system, pool)
+ module.exit_json(changed=changed, msg="Pool updated")
+
+
+def handle_absent(module):
+ system, pool = get_sys_pool(module)
+ if not pool:
+ module.exit_json(changed=False, msg="Pool already absent")
+ else:
+ delete_pool(module, pool)
+ module.exit_json(changed=True, msg="Pool removed")
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ size=dict(),
+ vsize=dict(),
+ ssd_cache=dict(type='bool', default=True),
+ compression=dict(type='bool', default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ if not HAS_CAPACITY:
+ module.fail_json(msg=missing_required_lib('capacity'))
+
+ if module.params['size']:
+ try:
+ Capacity(module.params['size'])
+ except Exception:
+ module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+
+ if module.params['vsize']:
+ try:
+ Capacity(module.params['vsize'])
+ except Exception:
+ module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
new file mode 100644
index 00000000..30312726
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
@@ -0,0 +1,403 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_port
+version_added: '2.9.0'
+short_description: Add and Delete fiber channel and iSCSI ports to a host on Infinibox
+description:
+ - This module adds or deletes fiber channel or iSCSI ports to hosts on
+ Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ host:
+ description:
+ - Host Name
+ required: true
+ state:
+ description:
+ - Creates mapping when present, removes when absent, or provides
+ details of a mapping when stat.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ type: str
+ wwns:
+ description:
+ - List of wwns of the host
+ required: false
+ default: []
+ type: list
+ elements: str
+ iqns:
+ description:
+ - List of iqns of the host
+ required: false
+ default: []
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Make sure host bar is available with wwn/iqn ports
+ infini_host:
+ name: bar.example.com
+ state: present
+ wwns:
+ - "00:00:00:00:00:00:00"
+ - "11:11:11:11:11:11:11"
+ iqns:
+ - "iqn.yyyy-mm.reverse-domain:unique-string"
+ system: ibox01
+ user: admin
+ password: secret
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_host,
+ merge_two_dicts,
+)
+
+try:
+ from infi.dtypes.wwn import WWN
+ from infi.dtypes.iqn import make_iscsi_name
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def update_ports(module, system):
+ """
+ Updated mapping of volume to host. If already mapped, exit_json with changed False.
+ """
+ changed = False
+
+ host = system.hosts.get(name=module.params["host"])
+
+ for wwn_port in module.params["wwns"]:
+ wwn = WWN(wwn_port)
+ if not system.hosts.get_host_by_initiator_address(wwn) == host:
+ if not module.check_mode:
+ host.add_port(wwn)
+ changed = True
+
+ for iscsi_port in module.params["iqns"]:
+ iscsi_name = make_iscsi_name(iscsi_port)
+ if not system.hosts.get_host_by_initiator_address(iscsi_name) == host:
+ if not module.check_mode:
+ host.add_port(iscsi_name)
+ changed = True
+
+ return changed
+
+
+@api_wrapper
+def delete_ports(module, system):
+ """
+ Remove ports from host.
+ """
+ changed = False
+
+ host = system.hosts.get(name=module.params["host"])
+ for wwn_port in module.params["wwns"]:
+ wwn = WWN(wwn_port)
+ if system.hosts.get_host_by_initiator_address(wwn) == host:
+ if not module.check_mode:
+ host.remove_port(wwn)
+ changed = True
+ for iscsi_port in module.params["iqns"]:
+ iscsi_name = make_iscsi_name(iscsi_port)
+ if system.hosts.get_host_by_initiator_address(iscsi_name) == host:
+ if not module.check_mode:
+ host.remove_port(iscsi_name)
+ changed = True
+ return changed
+
+
+def get_sys_host(module):
+ system = get_system(module)
+ host = get_host(module, system)
+ return (system, host)
+
+
+def edit_initiator_keys(host_initiators, include_key_list):
+ """
+ For each host initiator, remove keys not in the include_key_list.
+ For FCs, add a long address. This is the address with colons inserted.
+ Return the edited host initiators list.
+ """
+ trimmed_initiators = []
+ for init in host_initiators:
+ if init["type"] == "FC" and "address" in init.keys():
+ # Add address_long key to init whose value is the address with colons inserted.
+ address_str = str(init["address"])
+ address_iter = iter(address_str)
+ long_address = ":".join(a + b for a, b in zip(address_iter, address_iter))
+ init["address_long"] = long_address
+
+ trimmed_item = {
+ key: val for key, val in init.items() if key in include_key_list
+ }
+ trimmed_initiators.append(trimmed_item)
+ return trimmed_initiators
+
+
+def find_host_initiators_data(module, system, host, initiator_type):
+ """
+ Given a host object, find its initiators that match initiator_type.
+ Only include desired initiator keys for each initiator.
+ Return the filtered and edited host initiator list.
+ """
+ request = "initiators?page=1&page_size=1000&host_id={0}".format(host.id)
+ # print("\nrequest:", request, "initiator_type:", initiator_type)
+ get_initiators_result = system.api.get(request, check_version=False)
+ result_code = get_initiators_result.status_code
+ if result_code != 200:
+ msg = "get initiators REST call failed. code: {0}".format(result_code)
+ module.fail_json(msg=msg)
+
+ # Only return initiators of the desired type.
+ host_initiators_by_type = [
+ initiator
+ for initiator in get_initiators_result.get_result()
+ if initiator["type"] == initiator_type
+ ]
+
+ # print("host_initiators_by_type:", host_initiators_by_type)
+ # print()
+
+ # Only include certain keys in the returned initiators
+ if initiator_type == "FC":
+ include_key_list = [
+ "address",
+ "address_long",
+ "host_id",
+ "port_key",
+ "targets",
+ "type",
+ ]
+ elif initiator_type == "ISCSI":
+ include_key_list = ["address", "host_id", "port_key", "targets", "type"]
+ else:
+ msg = "Cannot search for host initiator types other than FC and ISCSI"
+ module.fail_json(msg=msg)
+ host_initiators_by_type = edit_initiator_keys(
+ host_initiators_by_type, include_key_list
+ )
+
+ return host_initiators_by_type
+
+
+def get_port_fields(module, system, host):
+ """
+ Return a dict with desired fields from FC and ISCSI ports associated with the host.
+ """
+ host_fc_initiators = find_host_initiators_data(
+ module, system, host, initiator_type="FC"
+ )
+ host_iscsi_initiators = find_host_initiators_data(
+ module, system, host, initiator_type="ISCSI"
+ )
+
+ field_dict = dict(ports=[],)
+
+ connectivity_lut = {0: "DISCONNECTED", 1: "DEGRADED", 2: "DEGRADED", 3: "CONNECTED"}
+
+ ports = host.get_ports()
+ for port in ports:
+ if str(type(port)) == "<class 'infi.dtypes.wwn.WWN'>":
+ found_initiator = False
+ for initiator in host_fc_initiators:
+ if initiator["address"] == str(port).replace(":", ""):
+ found_initiator = True
+ # print("initiator targets:", initiator['targets'])
+ unique_initiator_target_ids = {
+ target["node_id"] for target in initiator["targets"]
+ }
+ port_dict = {
+ "address": str(port),
+ "address_long": initiator["address_long"],
+ "connectivity": connectivity_lut[
+ len(unique_initiator_target_ids)
+ ],
+ "targets": initiator["targets"],
+ "type": initiator["type"],
+ }
+
+ if not found_initiator:
+ address_str = str(port)
+ address_iter = iter(address_str)
+ long_address = ":".join(
+ a + b for a, b in zip(address_iter, address_iter)
+ )
+ port_dict = {
+ "address": str(port),
+ "address_long": long_address,
+ "connectivity": connectivity_lut[0],
+ "targets": [],
+ "type": "FC",
+ }
+
+ field_dict["ports"].append(port_dict)
+
+ if str(type(port)) == "<class 'infi.dtypes.iqn.IQN'>":
+ found_initiator = False
+ for initiator in host_iscsi_initiators:
+ if initiator["address"] == str(port):
+ found_initiator = True
+ # print("initiator targets:", initiator['targets'])
+ unique_initiator_target_ids = {
+ target["node_id"] for target in initiator["targets"]
+ }
+ port_dict = {
+ "address": str(port),
+ "connectivity": connectivity_lut[
+ len(unique_initiator_target_ids)
+ ],
+ "targets": initiator["targets"],
+ "type": initiator["type"],
+ }
+
+ if not found_initiator:
+ port_dict = {
+ "address": str(port),
+ "connectivity": connectivity_lut[0],
+ "targets": [],
+ "type": "ISCSI",
+ }
+
+ field_dict["ports"].append(port_dict)
+
+ return field_dict
+
+
+def handle_stat(module):
+ """
+ Handle stat state. Fail if host is None.
+ Return json with status.
+ """
+ system, host = get_sys_host(module)
+
+ host_name = module.params["host"]
+ if not host:
+ module.fail_json(msg="Host {0} not found".format(host_name))
+
+ field_dict = get_port_fields(module, system, host)
+ result = dict(changed=False, msg="Host {0} ports found".format(host_name),)
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """
+ Handle present state. Fail if host is None.
+ """
+ system, host = get_sys_host(module)
+
+ host_name = module.params["host"]
+ if not host:
+ module.fail_json(msg="Host {0} not found".format(host_name))
+
+ changed = update_ports(module, system)
+ if changed:
+ msg = "Mapping created for host {0}".format(host.get_name())
+ else:
+ msg = "No mapping changes were required for host {0}".format(host.get_name())
+
+ result = dict(changed=changed, msg=msg,)
+ module.exit_json(**result)
+
+
+def handle_absent(module):
+ """
+ Handle absent state. Fail if host is None.
+ """
+ system, host = get_sys_host(module)
+ if not host:
+ module.exit_json(
+ changed=False, msg="Host {0} not found".format(module.params["host"])
+ )
+
+ changed = delete_ports(module, system)
+ if changed:
+ msg = "Mapping removed from host {0}".format(host.get_name())
+ else:
+ msg = "No mapping changes were required. Mapping already removed from host {0}".format(
+ host.get_name()
+ )
+
+ result = dict(changed=changed, msg=msg,)
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """
+ Handle states. Always logout.
+ """
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(
+ msg="Internal handler error. Invalid state: {0}".format(state)
+ )
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ pass
+
+
+def main():
+ """
+ Gather auguments and manage mapping of vols to hosts.
+ """
+ argument_spec = infinibox_argument_spec()
+ null_list = list()
+ argument_spec.update(
+ dict(
+ host=dict(required=True, type=str),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ wwns=dict(type="list", elements="str", default=list()),
+ iqns=dict(type="list", elements="str", default=list()),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
new file mode 100644
index 00000000..01bcd0a5
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_user
+version_added: '2.9.0'
+short_description: Create, Delete and Modify a User on Infinibox
+description:
+ - This module creates, deletes or modifies a user on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ user_name:
+ description:
+ - The new user's Name. Once a user is created, the user_name may not be
+ changed from this module. It may be changed from the UI or from
+ infinishell.
+ required: true
+ type: str
+ user_email:
+ description:
+ - The new user's Email address
+ required: false
+ type: str
+ user_password:
+ description:
+ - The new user's password
+ required: false
+ type: str
+ user_role:
+ description:
+ - The user's role
+ required: false
+ choices: [ "admin", "pool_admin", "read_only" ]
+ type: str
+ user_enabled:
+ description:
+ - Specify whether to enable the user
+ type: bool
+ required: false
+ default: true
+ user_pool:
+ description:
+ - Use with role==pool_admin. Specify the new user's pool.
+ required: false
+ type: str
+ state:
+ description:
+ - Creates/Modifies user when present or removes when absent
+ required: false
+ default: present
+ choices: [ "stat", "reset_password", "present", "absent" ]
+ type: str
+
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new user
+ infini_user:
+ user_name: foo_user
+ user_email: foo@example.com
+ user_password: secret2
+ user_role: pool_admin
+ user_enabled: false
+ pool: foo_pool
+ state: present
+ password: secret1
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ get_user,
+ get_pool,
+ unixMillisecondsToDate,
+ merge_two_dicts,
+)
+
+try:
+ from infi.dtypes.iqn import make_iscsi_name
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def create_user(module, system):
+ if not module.check_mode:
+ user = system.users.create(name=module.params['user_name'],
+ password=module.params['user_password'],
+ email=module.params['user_email'],
+ enabled=module.params['user_enabled'],
+ )
+ # Set the user's role
+ user.update_role(module.params['user_role'])
+ if module.params['user_pool']:
+ if not module.params['user_role'] == 'pool_admin':
+ raise AssertionError("user_pool set, but role is not 'pool_admin'")
+ # Add the user to the pool's owners
+ pool = system.pools.get(name=module.params['user_pool'])
+ add_user_to_pool_owners(user, pool)
+ changed = True
+ return changed
+
+
+def add_user_to_pool_owners(user, pool):
+ """
+ Find the current list of pool owners and add user using pool.set_owners().
+ set_owners() replaces the current owners with the list of new owners. So,
+ get owners, add user, then set owners. Further, we need to know if the
+ owners changed. Use sets of owners to compare.
+ """
+ # print("add_user_to_pool_owners(): start")
+ changed = False
+ pool_fields = pool.get_fields(from_cache=True, raw_value=True)
+ pool_owners = pool_fields.get('owners', [])
+ # print('pool_owners:', pool_owners, 'pool_owners type:', type(pool_owners))
+ # print('user:', user)
+ # print('pool:', pool)
+ pool_owners_set = set(pool_owners)
+ # print('pool_owners_set:', pool_owners_set)
+ new_pool_owners_set = pool_owners_set.copy()
+ new_pool_owners_set.add(user.id)
+ # print('new_pool_owners_set:', new_pool_owners_set)
+ if pool_owners_set != new_pool_owners_set:
+ pool.set_owners([user])
+ changed = True
+ # print("changed:", changed)
+ # print("add_user_to_pool_owners(): end")
+ return changed
+
+
+def remove_user_from_pool_owners(user, pool):
+ changed = False
+ pool_fields = pool.get_fields(from_cache=True, raw_value=True)
+ pool_owners = pool_fields.get('owners', [])
+ try:
+ pool_owners.remove(user)
+ pool.set_owners(pool_owners)
+ changed = True
+ except ValueError:
+ pass # User is not a pool owner
+ return changed
+
+
+@api_wrapper
+def update_user(module, system, user):
+ # print("update_user()")
+ if user is None:
+ raise AssertionError("Cannot update user {0}. User not found.".format(module.params["user_name"]))
+
+ changed = False
+ fields = user.get_fields(from_cache=True, raw_value=True)
+ if fields.get('role') != module.params['user_role'].upper():
+ user.update_field('role', module.params['user_role'])
+ changed = True
+ if fields.get('enabled') != module.params['user_enabled']:
+ user.update_field('enabled', module.params['user_enabled'])
+ changed = True
+ if fields.get('email') != module.params['user_email']:
+ user.update_field('email', module.params['user_email'])
+ changed = True
+
+ if module.params['user_pool']:
+ try:
+ pool_name = module.params['user_pool']
+ pool = system.pools.get(name=pool_name)
+ except Exception as err:
+ module.fail_json(msg='Cannot find pool {0}: {1}'.format(pool_name, err))
+ if add_user_to_pool_owners(user, pool):
+ changed = True
+ return changed
+
+
+@api_wrapper
+def reset_user_password(module, system, user):
+ # print("update_user()")
+ if user is None:
+ raise AssertionError("Cannot change user {0} password. User not found.".format(module.params["user_name"]))
+ user.update_password(module.params['user_password'])
+
+
+@api_wrapper
+def delete_user(module, user):
+ if not user:
+ return False
+
+ changed = True
+ if not module.check_mode:
+ # May raise APICommandFailed if mapped, etc.
+ user.delete()
+ return changed
+
+
+def get_sys_user(module):
+ system = get_system(module)
+ user = get_user(module, system)
+ # print("get_sys_user(): user:", user)
+ return (system, user)
+
+
+def get_user_fields(user):
+ pools = user.get_owned_pools()
+ pool_names = [pool.get_field('name') for pool in pools]
+
+ fields = user.get_fields(from_cache=True, raw_value=True)
+ field_dict = dict(
+ id=user.id,
+ enabled=fields.get('enabled', None),
+ role=fields.get('role', None),
+ email=fields.get('email', None),
+ pools=pool_names,
+ )
+ return field_dict
+
+
+def handle_stat(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ module.fail_json(msg='User {0} not found'.format(user_name))
+ field_dict = get_user_fields(user)
+ result = dict(
+ changed=False,
+ msg='User stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ changed = create_user(module, system)
+ msg = 'User {0} created'.format(user_name)
+ else:
+ changed = update_user(module, system, user)
+ if changed:
+ msg = 'User {0} updated'.format(user_name)
+ else:
+ msg = 'User {0} update required no changes'.format(user_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ changed = False
+ msg = "User {0} already absent".format(user_name)
+ else:
+ changed = delete_user(module, user)
+ msg = "User {0} removed".format(user_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_reset_password(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ msg = 'Cannot change password. User {0} not found'.format(user_name)
+ module.fail_json(msg=msg)
+ else:
+ reset_user_password(module, system, user)
+ msg = 'User {0} password changed'.format(user_name)
+ module.exit_json(changed=True, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ elif state == 'reset_password':
+ handle_reset_password(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ state = module.params['state']
+ user_role = module.params['user_role']
+ user_pool = module.params['user_pool']
+ if state == 'present':
+ if user_role == 'pool_admin' and not user_pool:
+ module.fail_json(msg='user_role "pool_admin" requires a user_pool to be provided')
+ if user_role != 'pool_admin' and user_pool:
+ module.fail_json(msg='Only user_role "pool_admin" should have a user_pool provided')
+
+ valid_keys = ['user_email', 'user_password', 'user_role', 'user_enabled']
+ for valid_key in valid_keys:
+ # Check required keys provided
+ try:
+ not_used = module.params[valid_key]
+ except KeyError:
+ msg = 'For state "present", options {0} are required'.format(", ".join(valid_keys))
+ module.fail_json(msg=msg)
+ elif state == 'reset_password':
+ if not module.params['user_password']:
+ msg = 'For state "reset_password", user_password is required'
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ user_name=dict(required=True),
+ user_email=dict(required=False),
+ user_password=dict(required=False, no_log=True),
+ user_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only']),
+ user_enabled=dict(required=False, type='bool', default=True),
+ user_pool=dict(required=False),
+ state=dict(default='present', choices=['stat', 'reset_password', 'present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
new file mode 100644
index 00000000..0c4a579b
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
@@ -0,0 +1,619 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: infini_vol
+version_added: '2.3.0'
+short_description: Create, Delete or Modify volumes on Infinibox
+description:
+ - This module creates, deletes or modifies a volume on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Volume Name
+ required: true
+ parent_volume_name:
+ description:
+ - Specify a volume name. This is the volume parent for creating a snapshot. Required if volume_type is snapshot.
+ required: false
+ pool:
+ description:
+ - Pool that master volume will reside within. Required for creating a master volume, but not a snapshot.
+ required: false
+ size:
+ description:
+ - Volume size in MB, GB or TB units. Required for creating a master volume, but not a snapshot
+ required: false
+ snapshot_lock_expires_at:
+ description:
+ - This will cause a snapshot to be locked at the specified date-time.
+ Uses python's datetime format YYYY-mm-dd HH:MM:SS.ffffff, e.g. 2020-02-13 16:21:59.699700
+ required: false
+ snapshot_lock_only:
+ description:
+ - This will lock an existing snapshot but will suppress refreshing the snapshot.
+ type: bool
+ required: false
+ default: false
+ state:
+ description:
+ - Creates/Modifies master volume or snapshot when present or removes when absent.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ thin_provision:
+ description:
+ - Whether the master volume should be thin or thick provisioned.
+ type: bool
+ required: false
+ default: true
+ write_protected:
+ description:
+ - Specifies if the volume should be write protected. Default will be True for snapshots, False for regular volumes.
+ required: false
+ default: "Default"
+ choices: ["Default", "True", "False"]
+ volume_type:
+ description:
+ - Specifies the volume type, regular volume or snapshot.
+ required: false
+ default: master
+ choices: [ "master", "snapshot" ]
+ restore_volume_from_snapshot:
+ description:
+ - Specify true to restore a volume (parent_volume_name) from an existing snapshot specified by the name field.
+ - State must be set to present and volume_type must be 'snapshot'.
+ required: false
+ default: false
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - capacity
+'''
+
+EXAMPLES = r'''
+- name: Create new volume named foo under pool named bar
+ infini_vol:
+ name: foo
+ # volume_type: master # Default
+ size: 1TB
+ thin_provision: yes
+ pool: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Create snapshot named foo_snap from volume named foo
+ infini_vol:
+ name: foo_snap
+ volume_type: snapshot
+ parent_volume_name: foo
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Stat snapshot, also a volume, named foo_snap
+ infini_vol:
+ name: foo_snap
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Remove snapshot, also a volume, named foo_snap
+ infini_vol:
+ name: foo_snap
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+import traceback
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ ObjectNotFound,
+ get_pool,
+ get_system,
+ get_volume,
+ get_vol_sn,
+)
+
+
+HAS_CAPACITY = True
+try:
+ from capacity import KiB, Capacity
+except ImportError:
+ HAS_CAPACITY = False
+
+HAS_ARROW = True
+try:
+ import arrow
+except ImportError:
+ HAS_ARROW = False
+
+except Exception:
+ HAS_INFINISDK = False
+
+
+@api_wrapper
+def create_volume(module, system):
+ """Create Volume"""
+ changed = False
+ if not module.check_mode:
+ if module.params["thin_provision"]:
+ prov_type = "THIN"
+ else:
+ prov_type = "THICK"
+ pool = get_pool(module, system)
+ volume = system.volumes.create(
+ name=module.params["name"], provtype=prov_type, pool=pool
+ )
+
+ if module.params["size"]:
+ size = Capacity(module.params["size"]).roundup(64 * KiB)
+ volume.update_size(size)
+ if module.params["write_protected"] is not None:
+ is_write_prot = volume.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ volume.update_field("write_protected", desired_is_write_prot)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def find_vol_id(module, system, vol_name):
+ """
+ Find the ID of this vol
+ """
+ vol_url = "volumes?name={0}&fields=id".format(vol_name)
+ vol = system.api.get(path=vol_url)
+
+ result = vol.get_json()["result"]
+ if len(result) != 1:
+ module.fail_json("Cannot find a volume with name '{0}'".format(vol_name))
+
+ vol_id = result[0]["id"]
+ # print("Volume {} has ID {}".format(vol_name, vol_id))
+ return vol_id
+
+
+@api_wrapper
+def restore_volume_from_snapshot(module, system):
+ """Use snapshot to restore a volume"""
+ changed = False
+ is_restoring = module.params["restore_volume_from_snapshot"]
+ volume_type = module.params["volume_type"]
+ snap_name = module.params["name"]
+ snap_id = find_vol_id(module, system, snap_name)
+ parent_volume_name = module.params["parent_volume_name"]
+ parent_volume_id = find_vol_id(module, system, parent_volume_name)
+
+ # Check params
+ if not is_restoring:
+ raise AssertionError("A programming error occurred. is_restoring is not True")
+ if volume_type != "snapshot":
+ module.fail_json(
+ msg="Cannot restore a parent volume from snapshot unless the volume "
+ "type is 'snapshot'"
+ )
+ if not parent_volume_name:
+ module.fail_json(
+ msg="Cannot restore a parent volume from snapshot unless the parent "
+ "volume name is specified"
+ )
+
+ if not module.check_mode:
+ restore_url = "volumes/{0}/restore?approved=true".format(parent_volume_id)
+ restore_data = {
+ "source_id": snap_id,
+ }
+ restore = system.api.post(path=restore_url, data=restore_data)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_volume(module, volume):
+ """Update Volume"""
+ changed = False
+ if module.params["size"]:
+ size = Capacity(module.params["size"]).roundup(64 * KiB)
+ if volume.get_size() != size:
+ if not module.check_mode:
+ volume.update_size(size)
+ changed = True
+ if module.params["thin_provision"] is not None:
+ type = str(volume.get_provisioning())
+ if type == "THICK" and module.params["thin_provision"]:
+ if not module.check_mode:
+ volume.update_provisioning("THIN")
+ changed = True
+ if type == "THIN" and not module.params["thin_provision"]:
+ if not module.check_mode:
+ volume.update_provisioning("THICK")
+ changed = True
+ if module.params["write_protected"] is not None:
+ is_write_prot = volume.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ volume.update_field("write_protected", desired_is_write_prot)
+
+ return changed
+
+
+@api_wrapper
+def delete_volume(module, volume):
+ """ Delete Volume. Volume could be a snapshot."""
+ if not module.check_mode:
+ volume.delete()
+ changed = True
+ return True
+
+
+@api_wrapper
+def create_snapshot(module, system):
+ """Create Snapshot from parent volume"""
+ snapshot_name = module.params["name"]
+ parent_volume_name = module.params["parent_volume_name"]
+ try:
+ parent_volume = system.volumes.get(name=parent_volume_name)
+ except ObjectNotFound as err:
+ msg = "Cannot create snapshot {0}. Parent volume {1} not found".format(
+ snapshot_name, parent_volume_name
+ )
+ module.fail_json(msg=msg)
+ if not parent_volume:
+ msg = "Cannot find new snapshot's parent volume named {0}".format(
+ parent_volume_name
+ )
+ module.fail_json(msg=msg)
+ if not module.check_mode:
+ if module.params["snapshot_lock_only"]:
+ msg = "Snapshot does not exist. Cannot comply with 'snapshot_lock_only: true'."
+ module.fail_json(msg=msg)
+ check_snapshot_lock_options(module)
+ snapshot = parent_volume.create_snapshot(name=snapshot_name)
+
+ if module.params["write_protected"] is not None:
+ is_write_prot = snapshot.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ snapshot.update_field("write_protected", desired_is_write_prot)
+
+ manage_snapshot_locks(module, snapshot)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_snapshot(module, snapshot):
+ """
+ Update/refresh snapshot. May also lock it.
+ """
+ refresh_changed = False
+ if not module.params["snapshot_lock_only"]:
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ if not snap_is_locked:
+ if not module.check_mode:
+ snapshot.refresh_snapshot()
+ refresh_changed = True
+ else:
+ msg = "Snapshot is locked and may not be refreshed"
+ module.fail_json(msg=msg)
+
+ check_snapshot_lock_options(module)
+ lock_changed = manage_snapshot_locks(module, snapshot)
+
+ if not module.check_mode:
+ if module.params["write_protected"] is not None:
+ is_write_prot = snapshot.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ snapshot.update_field("write_protected", desired_is_write_prot)
+
+ return refresh_changed or lock_changed
+
+
+def get_sys_pool_vol_parname(module):
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ volume = get_volume(module, system)
+ else:
+ volume = get_vol_sn(module, system)
+ parname = module.params["parent_volume_name"]
+ return (system, pool, volume, parname)
+
+
+def check_snapshot_lock_options(module):
+ """
+ Check if specified options are feasible for a snapshot.
+
+ Prevent very long lock times.
+ max_delta_minutes limits locks to 30 days (43200 minutes).
+
+ This functionality is broken out from manage_snapshot_locks() to allow
+ it to be called by create_snapshot() before the snapshot is actually
+ created.
+ """
+ snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+
+ # Check for lock in the past
+ now = arrow.utcnow()
+ if lock_expires_at <= now:
+ msg = "Cannot lock snapshot with a snapshot_lock_expires_at "
+ msg += "of '{0}' from the past".format(snapshot_lock_expires_at)
+ module.fail_json(msg=msg)
+
+ # Check for lock later than max lock, i.e. too far in future.
+ max_delta_minutes = 43200 # 30 days in minutes
+ max_lock_expires_at = now.shift(minutes=max_delta_minutes)
+ if lock_expires_at >= max_lock_expires_at:
+ msg = "snapshot_lock_expires_at exceeds {0} days in the future".format(
+ max_delta_minutes // 24 // 60
+ )
+ module.fail_json(msg=msg)
+
+
+def manage_snapshot_locks(module, snapshot):
+ """
+ Manage the locking of a snapshot. Check for bad lock times.
+ See check_snapshot_lock_options() which has additional checks.
+ """
+ name = module.params["name"]
+ snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ current_lock_expires_at = snapshot.get_lock_expires_at()
+ changed = False
+
+ check_snapshot_lock_options(module)
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+ if snap_is_locked and lock_expires_at < current_lock_expires_at:
+ # Lock earlier than current lock
+ msg = "snapshot_lock_expires_at '{0}' preceeds the current lock time of '{1}'".format(
+ lock_expires_at, current_lock_expires_at
+ )
+ module.fail_json(msg=msg)
+ elif snap_is_locked and lock_expires_at == current_lock_expires_at:
+ # Lock already set to correct time
+ pass
+ else:
+ # Set lock
+ if not module.check_mode:
+ snapshot.update_lock_expires_at(lock_expires_at)
+ changed = True
+ return changed
+
+
+def handle_stat(module):
+ system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ if not volume:
+ msg = "Volume {0} not found. Cannot stat.".format(module.params["name"])
+ module.fail_json(msg=msg)
+ fields = volume.get_fields() # from_cache=True, raw_value=True)
+ created_at = str(fields.get("created_at", None))
+ has_children = fields.get("has_children", None)
+ lock_expires_at = str(volume.get_lock_expires_at())
+ lock_state = volume.get_lock_state()
+ mapped = str(fields.get("mapped", None))
+ name = fields.get("name", None)
+ parent_id = fields.get("parent_id", None)
+ provisioning = fields.get("provisioning", None)
+ serial = str(volume.get_serial())
+ size = str(volume.get_size())
+ updated_at = str(fields.get("updated_at", None))
+ used = str(fields.get("used_size", None))
+ volume_id = fields.get("id", None)
+ volume_type = fields.get("type", None)
+ write_protected = fields.get("write_protected", None)
+ if volume_type == "SNAPSHOT":
+ msg = "Snapshot stat found"
+ else:
+ msg = "Volume stat found"
+
+ result = dict(
+ changed=False,
+ name=name,
+ created_at=created_at,
+ has_children=has_children,
+ lock_expires_at=lock_expires_at,
+ lock_state=lock_state,
+ mapped=mapped,
+ msg=msg,
+ parent_id=parent_id,
+ provisioning=provisioning,
+ serial=serial,
+ size=size,
+ updated_at=updated_at,
+ used=used,
+ volume_id=volume_id,
+ volume_type=volume_type,
+ write_protected=write_protected,
+ )
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ volume_type = module.params["volume_type"]
+ is_restoring = module.params["restore_volume_from_snapshot"]
+ if volume_type == "master":
+ if not volume:
+ changed = create_volume(module, system)
+ module.exit_json(changed=changed, msg="Volume created")
+ else:
+ changed = update_volume(module, volume)
+ module.exit_json(changed=changed, msg="Volume updated")
+ elif volume_type == "snapshot":
+ snapshot = volume
+ if is_restoring:
+ # Restore volume from snapshot
+ changed = restore_volume_from_snapshot(module, system)
+ module.exit_json(changed=changed, msg="Volume restored from snapshot")
+ else:
+ if not snapshot:
+ changed = create_snapshot(module, system)
+ module.exit_json(changed=changed, msg="Snapshot created")
+ else:
+ changed = update_snapshot(module, snapshot)
+ module.exit_json(changed=changed, msg="Snapshot updated")
+ else:
+ module.fail_json(msg="A programming error has occurred")
+
+
+def handle_absent(module):
+ system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ volume_type = module.params["volume_type"]
+
+ if volume and volume.get_lock_state() == "LOCKED":
+ msg = "Cannot delete snapshot. Locked."
+ module.fail_json(msg=msg)
+
+ if volume_type == "master":
+ if not volume:
+ module.exit_json(changed=False, msg="Volume already absent")
+ else:
+ changed = delete_volume(module, volume)
+ module.exit_json(changed=changed, msg="Volume removed")
+ elif volume_type == "snapshot":
+ if not volume:
+ module.exit_json(changed=False, msg="Snapshot already absent")
+ else:
+ snapshot = volume
+ changed = delete_volume(module, snapshot)
+ module.exit_json(changed=changed, msg="Snapshot removed")
+ else:
+ module.fail_json(msg="A programming error has occured")
+
+
+def execute_state(module):
+ # Handle different write_protected defaults depending on volume_type.
+ if module.params["volume_type"] == "snapshot":
+ if module.params["write_protected"] in ["True", "true", "Default"]:
+ module.params["write_protected"] = True
+ else:
+ module.params["write_protected"] = False
+ elif module.params["volume_type"] == "master":
+ if module.params["write_protected"] in ["False", "false", "Default"]:
+ module.params["write_protected"] = False
+ else:
+ module.params["write_protected"] = True
+ else:
+ msg = f"An error has occurred handling volume_type '{module.params['volume_type']}' or write_protected '{module.params['write_protected']}' values"
+ module.fail_json(msg=msg)
+
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(
+ msg="Internal handler error. Invalid state: {0}".format(state)
+ )
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ state = module.params["state"]
+ size = module.params["size"]
+ pool = module.params["pool"]
+ volume_type = module.params["volume_type"]
+ parent_volume_name = module.params["parent_volume_name"]
+
+ if state == "present":
+ if volume_type == "master":
+ if state == "present":
+ if parent_volume_name:
+ msg = "parent_volume_name should not be specified "
+ msg += "if volume_type is 'volume'. Snapshots only."
+ module.fail_json(msg=msg)
+ if not size:
+ msg = "Size is required to create a volume"
+ module.fail_json(msg=msg)
+ elif volume_type == "snapshot":
+ if size or pool:
+ msg = "Neither pool nor size should not be specified "
+ msg += "for volume_type snapshot"
+ module.fail_json(msg=msg)
+ if state == "present":
+ if not parent_volume_name:
+ msg = "For state 'present' and volume_type 'snapshot', "
+ msg += "parent_volume_name is required"
+ module.fail_json(msg=msg)
+ else:
+ msg = "A programming error has occurred"
+ module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=False),
+ parent_volume_name=dict(default=None, required=False, type=str),
+ pool=dict(required=False),
+ size=dict(),
+ serial=dict(),
+ snapshot_lock_expires_at=dict(),
+ snapshot_lock_only=dict(type="bool", default=False),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ thin_provision=dict(type="bool", default=True),
+ write_protected=dict(
+ default="Default", choices=["Default", "True", "False"]
+ ),
+ volume_type=dict(default="master", choices=["master", "snapshot"]),
+ restore_volume_from_snapshot=dict(default=False, type=bool),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ if not HAS_ARROW:
+ module.fail_json(msg=missing_required_lib("arrow"))
+
+ if module.params["size"]:
+ try:
+ Capacity(module.params["size"])
+ except Exception:
+ module.fail_json(
+ msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units"
+ )
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()