summaryrefslogtreecommitdiffstats
path: root/ansible_collections/infinidat/infinibox/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
commit38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch)
tree356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/infinidat/infinibox/plugins
parentAdding upstream version 7.7.0+dfsg. (diff)
downloadansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz
ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/infinidat/infinibox/plugins')
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py2
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py3
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py228
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py199
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py163
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py238
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py141
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py78
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py44
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py180
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py517
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py62
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py269
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py674
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py272
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py360
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py361
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py47
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py51
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py299
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py431
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py534
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py310
23 files changed, 4545 insertions, 918 deletions
diff --git a/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py b/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
index f88a55ea4..cf3fc13b0 100644
--- a/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
+++ b/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
diff --git a/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py b/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py
index 5e140bdd8..68000c02b 100644
--- a/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py
+++ b/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py
@@ -5,7 +5,7 @@
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
@@ -28,7 +28,6 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from ansible.errors import AnsibleError
import datetime
diff --git a/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py b/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
index 31df73d04..24f3aa9fb 100644
--- a/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
+++ b/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
@@ -1,16 +1,21 @@
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# pylint:
+# disable=use-list-literal,use-dict-literal,line-too-long,wrong-import-position,broad-exception-caught,invalid-name
+
+""" Infinidat utilities """
+
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
-from ansible.module_utils.six import raise_from
-try:
- import ansible.module_utils.errors
-except (ImportError, ModuleNotFoundError):
- import errors # Used during "make dev-hack-module-[present, stat, absent]"
+# try:
+# import ansible.module_utils.errors
+# except (ImportError, ModuleNotFoundError):
+# import errors # Used during "make dev-hack-module-[present, stat, absent]"
try:
from infinisdk import InfiniBox, core
@@ -22,13 +27,32 @@ else:
HAS_INFINISDK = True
INFINISDK_IMPORT_ERROR = None
+HAS_ARROW = True
+try:
+ import arrow
+except ImportError:
+ HAS_ARROW = False
+except Exception:
+ HAS_INFINISDK = False
+
from functools import wraps
from os import environ
from os import path
from datetime import datetime
+HAS_URLLIB3 = True
+try:
+ import urllib3
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+except ImportError:
+ HAS_URLLIB3 = False
+
-def unixMillisecondsToDate(unix_ms):
+INFINIBOX_SYSTEM = None
+
+
+def unixMillisecondsToDate(unix_ms): # pylint: disable=invalid-name
+ """ Convert unix time with ms to a datetime UTC time """
return (datetime.utcfromtimestamp(unix_ms / 1000.), 'UTC')
@@ -39,12 +63,13 @@ def api_wrapper(func):
module = args[0]
try:
return func(*args, **kwargs)
- except core.exceptions.APICommandException as e:
- module.fail_json(msg=e.message)
- except core.exceptions.SystemNotFoundException as e:
- module.fail_json(msg=e.message)
- except Exception:
- raise
+ except core.exceptions.SystemNotFoundException as err:
+ module.fail_json(msg=str(err))
+ except core.exceptions.APICommandException as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+ return None # Should never get to this line but it quiets pylint inconsistent-return-statements
return __wrapper
@@ -74,28 +99,37 @@ def merge_two_dicts(dict1, dict2):
@api_wrapper
def get_system(module):
- """Return System Object or Fail"""
- box = module.params['system']
- user = module.params.get('user', None)
- password = module.params.get('password', None)
-
- if user and password:
- system = InfiniBox(box, auth=(user, password), use_ssl=True)
- elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
- system = InfiniBox(box,
- auth=(environ.get('INFINIBOX_USER'),
- environ.get('INFINIBOX_PASSWORD')),
- use_ssl=True)
- elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
- system = InfiniBox(box, use_ssl=True)
- else:
- module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
+ """
+ Return System Object if it does not exist or Fail.
+ Use a global system Infinibox object so that there will only be one
+ system session used for this module instance.
+ Enables execute_state() to log out of the only session properly.
+ """
+ global INFINIBOX_SYSTEM # pylint: disable=global-statement
+
+ if not INFINIBOX_SYSTEM:
+ # Create system and login
+ box = module.params['system']
+ user = module.params.get('user', None)
+ password = module.params.get('password', None)
+ if user and password:
+ INFINIBOX_SYSTEM = InfiniBox(box, auth=(user, password), use_ssl=True)
+ elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
+ INFINIBOX_SYSTEM = InfiniBox(box,
+ auth=(environ.get('INFINIBOX_USER'),
+ environ.get('INFINIBOX_PASSWORD')),
+ use_ssl=True)
+ elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
+ INFINIBOX_SYSTEM = InfiniBox(box, use_ssl=True)
+ else:
+ module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
- try:
- system.login()
- except Exception:
- module.fail_json(msg="Infinibox authentication failed. Check your credentials")
- return system
+ try:
+ INFINIBOX_SYSTEM.login()
+ except Exception:
+ module.fail_json(msg="Infinibox authentication failed. Check your credentials")
+
+ return INFINIBOX_SYSTEM
@api_wrapper
@@ -108,7 +142,10 @@ def get_pool(module, system):
try:
name = module.params['pool']
except KeyError:
- name = module.params['name']
+ try:
+ name = module.params['name']
+ except KeyError:
+ name = module.params['object_name'] # For metadata
return system.pools.get(name=name)
except Exception:
return None
@@ -121,7 +158,10 @@ def get_filesystem(module, system):
try:
filesystem = system.filesystems.get(name=module.params['filesystem'])
except KeyError:
- filesystem = system.filesystems.get(name=module.params['name'])
+ try:
+ filesystem = system.filesystems.get(name=module.params['name'])
+ except KeyError:
+ filesystem = system.filesystems.get(name=module.params['object_name'])
return filesystem
except Exception:
return None
@@ -137,7 +177,7 @@ def get_export(module, system):
export_name = module.params['name']
export = system.exports.get(export_path=export_name)
- except ObjectNotFound as err:
+ except ObjectNotFound:
return None
return export
@@ -150,7 +190,10 @@ def get_volume(module, system):
try:
volume = system.volumes.get(name=module.params['name'])
except KeyError:
- volume = system.volumes.get(name=module.params['volume'])
+ try:
+ volume = system.volumes.get(name=module.params['volume'])
+ except KeyError:
+ volume = system.volumes.get(name=module.params['object_name']) # Used by metadata module
return volume
except Exception:
return None
@@ -167,16 +210,23 @@ def get_net_space(module, system):
@api_wrapper
-def get_vol_sn(module, system):
- """Return Volume or None"""
+def get_vol_by_sn(module, system):
+ """Return volume that matches the serial or None"""
try:
- try:
- volume = system.volumes.get(serial=module.params['serial'])
- except KeyError:
- return None
- return volume
+ volume = system.volumes.get(serial=module.params['serial'])
except Exception:
return None
+ return volume
+
+
+@api_wrapper
+def get_fs_by_sn(module, system):
+ """Return filesystem that matches the serial or None"""
+ try:
+ filesystem = system.filesystems.get(serial=module.params['serial'])
+ except Exception:
+ return None
+ return filesystem
@api_wrapper
@@ -189,7 +239,10 @@ def get_host(module, system):
try:
host_param = module.params['name']
except KeyError:
- host_param = module.params['host']
+ try:
+ host_param = module.params['host']
+ except KeyError:
+ host_param = module.params['object_name'] # For metadata
if a_host_name == host_param:
host = a_host
@@ -208,7 +261,10 @@ def get_cluster(module, system):
try:
cluster_param = module.params['name']
except KeyError:
- cluster_param = module.params['cluster']
+ try:
+ cluster_param = module.params['cluster']
+ except KeyError:
+ cluster_param = module.params['object_name'] # For metadata
if a_cluster_name == cluster_param:
cluster = a_cluster
@@ -217,12 +273,86 @@ def get_cluster(module, system):
@api_wrapper
-def get_user(module, system):
+def get_user(module, system, user_name_to_find=None):
"""Find a user by the user_name specified in the module"""
user = None
- user_name = module.params['user_name']
+ if not user_name_to_find:
+ user_name = module.params['user_name']
+ else:
+ user_name = user_name_to_find
try:
user = system.users.get(name=user_name)
except ObjectNotFound:
pass
return user
+
+
+def check_snapshot_lock_options(module):
+ """
+ Check if specified options are feasible for a snapshot.
+
+ Prevent very long lock times.
+ max_delta_minutes limits locks to 30 days (43200 minutes).
+
+ This functionality is broken out from manage_snapshot_locks() to allow
+ it to be called by create_snapshot() before the snapshot is actually
+ created.
+ """
+ snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+
+ # Check for lock in the past
+ now = arrow.utcnow()
+ if lock_expires_at <= now:
+ msg = "Cannot lock snapshot with a snapshot_lock_expires_at "
+ msg += f"of '{snapshot_lock_expires_at}' from the past"
+ module.fail_json(msg=msg)
+
+ # Check for lock later than max lock, i.e. too far in future.
+ max_delta_minutes = 43200 # 30 days in minutes
+ max_lock_expires_at = now.shift(minutes=max_delta_minutes)
+ if lock_expires_at >= max_lock_expires_at:
+ msg = f"snapshot_lock_expires_at exceeds {max_delta_minutes // 24 // 60} days in the future"
+ module.fail_json(msg=msg)
+
+
+def manage_snapshot_locks(module, snapshot):
+ """
+ Manage the locking of a snapshot. Check for bad lock times.
+ See check_snapshot_lock_options() which has additional checks.
+ """
+ snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ current_lock_expires_at = snapshot.get_lock_expires_at()
+ changed = False
+
+ check_snapshot_lock_options(module)
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+ if snap_is_locked and lock_expires_at < current_lock_expires_at:
+ # Lock earlier than current lock
+ msg = f"snapshot_lock_expires_at '{lock_expires_at}' preceeds the current lock time of '{current_lock_expires_at}'"
+ module.fail_json(msg=msg)
+ elif snap_is_locked and lock_expires_at == current_lock_expires_at:
+ # Lock already set to correct time
+ pass
+ else:
+ # Set lock
+ if not module.check_mode:
+ snapshot.update_lock_expires_at(lock_expires_at)
+ changed = True
+ return changed
+
+
+def catch_failed_module_utils_imports(module):
+ msg = ""
+ if not HAS_ARROW:
+ msg += "Failed to import arrow module. "
+ if not HAS_INFINISDK:
+ msg += "Failed to import infinisdk module. "
+ if not HAS_URLLIB3:
+ msg += "Failed to import urllib3 module. "
+ module.fail_json(msg=msg)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py
new file mode 100644
index 000000000..bb32b48b1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""This module creates or modifies SSL certificates on Infinibox."""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_certificate
+version_added: 2.16.0
+short_description: Create (present state) or clear (absent state) SSL certificates on Infinibox
+description:
+ - This module uploads (present state) or clears (absent state) SSL certificates on Infinibox
+author: David Ohlemacher (@ohlemacher)
+options:
+ certificate_file_name:
+ description:
+ - Name with full path of a certificate file.
+ type: str
+ required: false
+ state:
+ description:
+ - Creates/Modifies the systems SSL certificate by uploading one from a file, when using state present.
+ - For state absent, the current certificate is removed and a new self-signed certificate is automatically generated by the IBOX.
+ - State stat shows the existing certificate's details.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Upload SSL certificate from file
+ infini_certificate:
+ certificate_file_name: cert.crt
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: State SSL certificate
+ infini_certificate:
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Clear SSL certificate
+ infini_certificate:
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ merge_two_dicts,
+ get_system,
+ infinibox_argument_spec,
+)
+
+HAS_URLLIB3 = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ HAS_URLLIB3 = False
+
+
+def handle_stat(module):
+ """ Handle the stat state parameter """
+ certificate_file_name = module.params['certificate_file_name']
+ path = "system/certificates"
+ system = get_system(module)
+ try:
+ cert_result = system.api.get(path=path).get_result()[0]
+ except APICommandFailed:
+ msg = f"Cannot stat SSL certificate {certificate_file_name}"
+ module.fail_json(msg=msg)
+ result = dict(
+ changed=False,
+ msg="SSL certificate stat {certificate_file_name} found"
+ )
+ result = merge_two_dicts(result, cert_result)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """ Handle the present state parameter """
+ certificate_file_name = module.params['certificate_file_name']
+ path = "system/certificates"
+ system = get_system(module)
+
+ with open(certificate_file_name, 'rb') as cert_file:
+ try:
+ try:
+ files = {'file': cert_file}
+ except FileNotFoundError:
+ module.fail_json(msg=f"Cannot find SSL certificate file named {certificate_file_name}")
+ except Exception as err: # pylint: disable=broad-exception-caught
+ module.fail_json(msg=f"Cannot open SSL certificate file named {certificate_file_name}: {err}")
+ cert_result = system.api.post(path=path, files=files).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot upload cert: {err}"
+ module.fail_json(msg=msg)
+
+ cert_serial = cert_result['certificate']['serial_number']
+ cert_issued_by_cn = cert_result['certificate']['issued_by']['CN']
+ cert_issued_to_cn = cert_result['certificate']['issued_to']['CN']
+ result = dict(
+ changed=True,
+ msg="System SSL certificate uploaded successfully. " +
+ f"Certificate S/N {cert_serial} issued by CN {cert_issued_by_cn} to CN {cert_issued_to_cn}"
+ )
+ result = merge_two_dicts(result, cert_result)
+ module.exit_json(**result)
+
+
+def handle_absent(module):
+ """ Handle the absent state parameter. Clear existing cert. IBOX will install self signed cert. """
+ path = "system/certificates/generate_self_signed?approved=true"
+ system = get_system(module)
+ try:
+ cert_result = system.api.post(path=path).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot clear SSL certificate: {err}"
+ module.fail_json(msg=msg)
+ result = dict(
+ changed=True,
+ msg="System SSL certificate cleared and a self signed certificate was installed successfully"
+ )
+ result = merge_two_dicts(result, cert_result)
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ certificate_file_name = module.params["certificate_file_name"]
+ state = module.params["state"]
+
+ if state in ["stat", "absent"]:
+ pass
+ if state in ["present"]:
+ if not certificate_file_name:
+ msg = "Certificate file name parameter must be provided"
+ module.fail_json(msg=msg)
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ certificate_file_name=dict(required=False, default=None),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_URLLIB3:
+ module.fail_json(msg=missing_required_lib("urllib3"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
index fe682cf3c..c1972efe2 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
@@ -1,10 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+""" A module for managing Infinibox clusters """
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
@@ -51,12 +56,9 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
try:
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
- INFINISDK_IMPORT_ERROR,
api_wrapper,
infinibox_argument_spec,
get_system,
@@ -67,7 +69,6 @@ try:
except ModuleNotFoundError:
from infinibox import ( # Used when hacking
HAS_INFINISDK,
- INFINISDK_IMPORT_ERROR,
api_wrapper,
infinibox_argument_spec,
get_system,
@@ -76,12 +77,6 @@ except ModuleNotFoundError:
merge_two_dicts,
)
-try:
- from infi.dtypes.iqn import make_iscsi_name
- HAS_INFI_MOD = True
-except ImportError:
- HAS_INFI_MOD = False
-
@api_wrapper
def get_host_by_name(system, host_name):
@@ -98,67 +93,58 @@ def get_host_by_name(system, host_name):
@api_wrapper
def create_cluster(module, system):
- # print("create cluster")
- changed = True
+ """ Create a cluster """
+ changed = False
if not module.check_mode:
cluster = system.host_clusters.create(name=module.params['name'])
cluster_hosts = module.params['cluster_hosts']
- for cluster_host in cluster_hosts:
- if cluster_host['host_cluster_state'] == 'present':
- host = get_host_by_name(system, cluster_host['host_name'])
- cluster.add_host(host)
- # print("Added host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
- # else:
- # print("Skipped adding (absent) host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
+ if cluster_hosts:
+ for cluster_host in cluster_hosts:
+ if cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, cluster_host['host_name'])
+ cluster.add_host(host)
+ changed = True
return changed
@api_wrapper
def update_cluster(module, system, cluster):
- # print("update cluster")
+ """ Update a cluster """
changed = False
# e.g. of one host dict found in the module.params['cluster_hosts'] list:
# {host_name: <'some_name'>, host_cluster_state: <'present' or 'absent'>}
module_cluster_hosts = module.params['cluster_hosts']
current_cluster_hosts_names = [host.get_name() for host in cluster.get_field('hosts')]
- # print("current_cluster_hosts_names:", current_cluster_hosts_names)
- for module_cluster_host in module_cluster_hosts:
- module_cluster_host_name = module_cluster_host['host_name']
- # print("module_cluster_host_name:", module_cluster_host_name)
- # Need to add host to cluster?
- if module_cluster_host_name not in current_cluster_hosts_names:
- if module_cluster_host['host_cluster_state'] == 'present':
- host = get_host_by_name(system, module_cluster_host_name)
- if not host:
- msg = 'Cannot find host {0} to add to cluster {1}'.format(
- module_cluster_host_name,
- cluster.get_name(),
- )
- module.fail_json(msg=msg)
- cluster.add_host(host)
- # print("Added host {0} to cluster {1}".format(host.get_name(), cluster.get_name()))
- changed = True
- # Need to remove host from cluster?
- elif module_cluster_host_name in current_cluster_hosts_names:
- if module_cluster_host['host_cluster_state'] == 'absent':
- host = get_host_by_name(system, module_cluster_host_name)
- if not host:
- msg = 'Cannot find host {0} to add to cluster {1}'.format(
- module_cluster_host_name,
- cluster.get_name(),
- )
- module.fail_json(msg=msg)
- cluster.remove_host(host)
- # print("Removed host {0} from cluster {1}".format(host.get_name(), cluster.get_name()))
- changed = True
+ if module_cluster_hosts:
+ for module_cluster_host in module_cluster_hosts:
+ module_cluster_host_name = module_cluster_host['host_name']
+ # Need to add host to cluster?
+ if module_cluster_host_name not in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = f'Cannot find host {module_cluster_host_name} to add to cluster {cluster.get_name()}'
+ module.fail_json(msg=msg)
+ cluster.add_host(host)
+ changed = True
+ # Need to remove host from cluster?
+ elif module_cluster_host_name in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'absent':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = f'Cannot find host {module_cluster_host_name} to add to cluster {cluster.get_name()}'
+ module.fail_json(msg=msg)
+ cluster.remove_host(host)
+ changed = True
return changed
@api_wrapper
def delete_cluster(module, cluster):
+ """ Delete a cluster """
if not cluster:
- msg = "Cluster {0} not found".format(cluster.get_name())
+ msg = f"Cluster {cluster.get_name()} not found"
module.fail_json(msg=msg)
changed = True
if not module.check_mode:
@@ -166,13 +152,8 @@ def delete_cluster(module, cluster):
return changed
-def get_sys_cluster(module):
- system = get_system(module)
- cluster = get_cluster(module, system)
- return (system, cluster)
-
-
def get_cluster_fields(cluster):
+ """ Find fields for cluster """
fields = cluster.get_fields(from_cache=True, raw_value=True)
created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
field_dict = dict(
@@ -192,10 +173,12 @@ def get_cluster_fields(cluster):
def handle_stat(module):
- system, cluster = get_sys_cluster(module)
+ """ Handle stat state """
+ system = get_system(module)
+ cluster = get_cluster(module, system)
cluster_name = module.params["name"]
if not cluster:
- module.fail_json(msg='Cluster {0} not found'.format(cluster_name))
+ module.fail_json(msg=f'Cluster {cluster_name} not found')
field_dict = get_cluster_fields(cluster)
result = dict(
changed=False,
@@ -206,34 +189,39 @@ def handle_stat(module):
def handle_present(module):
- system, cluster = get_sys_cluster(module)
+ """ Handle present state """
+ system = get_system(module)
+ cluster = get_cluster(module, system)
cluster_name = module.params["name"]
if not cluster:
changed = create_cluster(module, system)
- msg = 'Cluster {0} created'.format(cluster_name)
+ msg = f'Cluster {cluster_name} created'
module.exit_json(changed=changed, msg=msg)
else:
changed = update_cluster(module, system, cluster)
if changed:
- msg = 'Cluster {0} updated'.format(cluster_name)
+ msg = f'Cluster {cluster_name} updated'
else:
- msg = 'Cluster {0} required no changes'.format(cluster_name)
+ msg = f'Cluster {cluster_name} required no changes'
module.exit_json(changed=changed, msg=msg)
def handle_absent(module):
- system, cluster = get_sys_cluster(module)
+ """ Handle absent state """
+ system = get_system(module)
+ cluster = get_cluster(module, system)
cluster_name = module.params["name"]
if not cluster:
changed = False
- msg = "Cluster {0} already absent".format(cluster_name)
+ msg = f"Cluster {cluster_name} already absent"
else:
changed = delete_cluster(module, cluster)
- msg = "Cluster {0} removed".format(cluster_name)
+ msg = f"Cluster {cluster_name} removed"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Handle states """
state = module.params['state']
try:
if state == 'stat':
@@ -243,36 +231,38 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def check_options(module):
+ """ Check module parameters for logic errors """
state = module.params['state']
if state == 'present':
- if module.params['cluster_hosts'] is None:
- module.fail_json(msg='Option cluster_hosts, a list, must be provided')
-
cluster_hosts = module.params['cluster_hosts']
- for host in cluster_hosts:
- try:
- # Check host has required keys
- valid_keys = ['host_name', 'host_cluster_state']
- for valid_key in valid_keys:
- not_used = host[valid_key]
- # Check host has no unknown keys
- if len(host.keys()) != len(valid_keys):
- raise KeyError
- except KeyError:
- msg = 'With state present, all cluster_hosts ' \
- + 'require host_name and host_cluster_state key:values ' \
- + 'and no others'
- module.fail_json(msg=msg)
+ if cluster_hosts:
+ for host in cluster_hosts:
+ try:
+ # Check host has required keys
+ valid_keys = ['host_name', 'host_cluster_state']
+ for valid_key in valid_keys:
+ # _ = host[valid_key]
+ if valid_key not in host.keys():
+ raise KeyError
+ # Check host has no unknown keys
+ if len(host.keys()) != len(valid_keys):
+ raise KeyError
+ except KeyError:
+ msg = 'With state present, all cluster_hosts ' \
+ + 'require host_name and host_cluster_state key:values ' \
+ + 'and no others'
+ module.fail_json(msg=msg)
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
@@ -284,9 +274,6 @@ def main():
module = AnsibleModule(argument_spec, supports_check_mode=True)
- if not HAS_INFI_MOD:
- module.fail_json(msg=missing_required_lib('infi.dtypes.iqn'))
-
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py
new file mode 100644
index 000000000..881480008
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module Modifies config on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_config
+version_added: 2.13.0
+short_description: Modify config on Infinibox
+description:
+ - This module modifies system config on Infinibox.
+author: Wei Wang (@wwang)
+options:
+ config_group:
+ description:
+ - Config group
+ type: str
+ required: true
+ choices: [ "core", "ip_config", "iscsi", "limits", "mgmt", "ndoe_interfaces", "overriders", "security", "ssh" ]
+ key:
+ description:
+ - Name of the config
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the config key
+ type: str
+ required: false
+ state:
+ description:
+ - Query or modifies config when.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Set compression setting to true
+ infini_config:
+ config_group: "mgmt"
+ key: "pool.compression_enabled_default"
+ value: false
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+"""
+
+# RETURN = r''' # '''
+
+# -*- coding: utf-8 -*-
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def get_config(module, disable_fail=False):
+ """
+ Find and return config setting value
+ Use disable_fail when we are looking for config
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ result = None
+
+ path = f"config/{config_group}/{key}"
+ try:
+ api_response = system.api.get(path=path)
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot {config_group} key {key}: {err}")
+
+ if api_response:
+ result = api_response.get_result()
+ good_status = api_response.response.status_code == 200
+ if not disable_fail and not good_status:
+ msg = f"Configuration for {config_group} with key {key} failed"
+ module.fail_json(msg=msg)
+ elif disable_fail and not good_status:
+ return None
+ return result
+
+
+def handle_stat(module):
+ """Return config stat"""
+
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ value = get_config(module)
+
+ result = {
+ "changed": False,
+ "object_type": config_group,
+ "key": key,
+ "value": value,
+ }
+ module.exit_json(**result)
+
+
+@api_wrapper
+def set_config(module):
+ """
+ Find and return config setting value
+ Use disable_fail when we are looking for config
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ value = module.params["value"]
+
+ path = f"config/{config_group}/{key}"
+
+ if value.lower() == "true":
+ data = True
+ elif value.lower() == "false":
+ data = False
+
+ try:
+ system.api.put(path=path, data=data)
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot set config group {config_group} key {key} to value {value}: {err}")
+ # Variable 'changed' not returned by design
+
+
+def handle_present(module):
+ """Make config present"""
+ changed = False
+ msg = "Config unchanged"
+ if not module.check_mode:
+ old_config = get_config(module, disable_fail=True)
+ set_config(module)
+ new_config = get_config(module)
+ changed = new_config != old_config
+ if changed:
+ msg = "Config changed"
+ else:
+ msg = "Config unchanged since the value is the same as the existing config"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ state = module.params["state"]
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ value = module.params["value"]
+ vtype = type(value)
+
+ groups = [
+ "core",
+ "ip_config",
+ "iscsi",
+ "limits",
+ "mgmt",
+ "ndoe_interfaces",
+ "overriders",
+ "security",
+ "ssh",
+ ]
+
+ if state == "present" and key == "pool.compression_enabled_default":
+ if not isinstance(value, str): # isvalue.lower() not in values:
+ module.fail_json(
+ f"Value must be of type {type(str())}. Invalid value: {value} of {vtype}."
+ )
+ if config_group not in groups:
+ module.fail_json(
+ f"Config_group must be one of {groups}"
+ )
+
+
+def main():
+ """Main module function"""
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "config_group": {"required": True, "choices": ["core", "ip_config", "iscsi", "limits", "mgmt", "ndoe_interfaces", "overriders", "security", "ssh"]},
+ "key": {"required": True, "no_log": False},
+ "value": {"required": False, "default": None},
+ "state": {"required": False, "default": "present", "choices": ["stat", "present"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py
new file mode 100644
index 000000000..bc6cbdade
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module sends events to Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_event
+version_added: 2.16.0
+short_description: Create custom events on Infinibox
+description:
+ - This module creates events on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ description_template:
+ description:
+ - The content of the custom event
+ type: str
+ required: true
+ visibility:
+ description:
+ - The event's visibility
+ type: str
+ required: false
+ choices:
+ - CUSTOMER
+ - INFINIDAT
+ default: CUSTOMER
+ level:
+ description:
+ - The level of the custom event
+ type: str
+ required: true
+ choices:
+ - INFO
+ - WARNING
+ - ERROR
+ - CRITICAL
+ state:
+ description:
+ - Creates a custom event when present. Stat is not yet implemented. There is no way to remove events once posted, so abent is also not implemented.
+ type: str
+ required: false
+ default: present
+ choices: [ "present" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create custom info event
+ infini_event:
+ description_template: Message content
+ level: INFO
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ infinibox_argument_spec,
+ get_system,
+)
+
+
+def handle_stat(module):
+ """Handle stat state"""
+ msg = "handle_stat() is not implemented"
+ module.exit_json(msg=msg)
+
+
+def handle_present(module):
+ """Handle present state"""
+ system = get_system(module)
+ description_template = module.params["description_template"]
+ level = module.params["level"]
+ visibility = module.params["visibility"]
+
+ path = "events/custom"
+ json_data = {
+ "description_template": description_template,
+ "level": level,
+ "visibility": visibility,
+ }
+ system.api.post(path=path, data=json_data)
+ module.exit_json(changed=True, msg="Event posted")
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ else:
+ module.exit_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ description_template=dict(required=True),
+ level=dict(required=True, choices=["INFO", "WARNING", "ERROR", "CRITICAL"]),
+ state=dict(required=False, default="present", choices=["present"]),
+ visibility=dict(default="CUSTOMER", required=False, choices=["CUSTOMER", "INFINIDAT"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.exit_json(msg=missing_required_lib("infinisdk"))
+
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
index f83e9b1f1..409c89924 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
@@ -1,10 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat(info@infinidat.com)
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module modifies exports on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat(info@infinidat.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
@@ -82,13 +87,13 @@ EXAMPLES = r'''
client_list:
- client: 192.168.0.2
access: RW
- no_root_squash: True
+ no_root_squash: true
- client: 192.168.0.100
access: RO
- no_root_squash: False
+ no_root_squash: false
- client: 192.168.0.10-192.168.0.20
access: RO
- no_root_squash: False
+ no_root_squash: false
system: ibox001
user: admin
password: secret
@@ -98,8 +103,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -110,23 +113,22 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
merge_two_dicts,
)
-MUNCH_IMP_ERR = None
+HAS_MUNCH = True
try:
from munch import unmunchify
- HAS_MUNCH = True
except ImportError:
HAS_MUNCH = False
- MUNCH_IMPORT_ERROR = traceback.format_exc()
def transform(d):
+ """ Create a frozen set from a normal set's items """
return frozenset(d.items())
def create_export(module, export, filesystem, system):
- """ Create new filesystem or update existing one"""
+ """ Create new export """
if export:
- raise AssertionError("Export {0} already exists".format(export.get_name()))
+ raise AssertionError(f"Export {export.get_name()} already exists")
changed = False
name = module.params['name']
@@ -141,14 +143,13 @@ def create_export(module, export, filesystem, system):
@api_wrapper
-def update_export(module, export, filesystem, system):
- """ Create new filesystem or update existing one"""
+def update_export(module, export):
+ """ Update existing export """
if not export:
- raise AssertionError("Export {0} does not exist and cannot be updated".format(export.get_name()))
+ raise AssertionError(f"Export {export.get_name()} does not exist and cannot be updated")
changed = False
- name = module.params['name']
client_list = module.params['client_list']
if client_list:
@@ -164,21 +165,15 @@ def update_export(module, export, filesystem, system):
@api_wrapper
def delete_export(module, export):
- """ Delete file system"""
+ """ Delete export """
if not module.check_mode:
export.delete()
changed = True
return changed
-def get_sys_exp_fs(module):
- system = get_system(module)
- filesystem = get_filesystem(module, system)
- export = get_export(module, system)
- return (system, export, filesystem)
-
-
def get_export_fields(export):
+ """ Return export fields dict """
fields = export.get_fields() # from_cache=True, raw_value=True)
export_id = fields.get('id', None)
permissions = fields.get('permissions', None)
@@ -192,15 +187,13 @@ def get_export_fields(export):
def handle_stat(module):
- """
- Gather stats on export and return. Changed is always False.
- """
- system, export, filesystem = get_sys_exp_fs(module)
+ """ Gather stats on export and return. Changed is always False. """
+ name = module.params['name']
+ filesystem_name = module.params['filesystem']
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
- module.fail_json(msg='Export "{0}" of file system "{1}" not found'.format(
- module.params['name'],
- module.params['filesystem'],
- ))
+ module.fail_json(msg=f"Export '{name}' of file system '{filesystem_name}' not found")
field_dict = get_export_fields(export)
result = dict(
@@ -212,30 +205,38 @@ def handle_stat(module):
def handle_present(module):
- system, export, filesystem = get_sys_exp_fs(module)
+ """ Handle present state """
+ system = get_system(module)
+ filesystem = get_filesystem(module, system)
+ export = get_export(module, system)
+ filesystem_name = module.params['filesystem']
if not filesystem:
- module.fail_json(msg='File system {0} not found'.format(module.params['filesystem']))
+ module.fail_json(msg=f'File system {filesystem_name} not found')
elif not export:
changed = create_export(module, export, filesystem, system)
module.exit_json(changed=changed, msg="File system export created")
else:
- changed = update_export(module, export, filesystem, system)
+ changed = update_export(module, export)
module.exit_json(changed=changed, msg="File system export updated")
def handle_absent(module):
- system, export, filesystem = get_sys_exp_fs(module)
+ """ Handle absent state """
+ system = get_system(module)
+ export = get_export(module, system)
+ filesystem_name = module.params['filesystem']
if not export:
changed = False
- msg = "Export of {0} already absent".format(module.params['filesystem'])
+ msg = "Export of {filesystem_name} already absent"
module.exit_json(changed=changed, msg=msg)
else:
changed = delete_export(module, export)
- msg = "Export of {0} deleted".format(module.params['filesystem'])
+ msg = f"Export of {filesystem_name} deleted"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Execute states """
state = module.params['state']
try:
if state == 'stat':
@@ -245,13 +246,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
index d35705787..d1889511f 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
@@ -1,10 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+""" Manage Infinibox export clients """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position, wrong-import-order
+
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
@@ -58,7 +63,7 @@ EXAMPLES = r'''
infini_export_client:
client: 10.0.0.1
access_mode: RW
- no_root_squash: yes
+ no_root_squash: true
export: /data
state: present # Default
user: admin
@@ -69,7 +74,7 @@ EXAMPLES = r'''
infini_export_client:
client: "{{ item }}"
access_mode: RO
- no_root_squash: no
+ no_root_squash: false
export: /data
user: admin
password: secret
@@ -94,7 +99,7 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
merge_two_dicts,
)
-MUNCH_IMP_ERR = None
+MUNCH_IMPORT_ERROR = None
try:
from munch import Munch, unmunchify
HAS_MUNCH = True
@@ -145,7 +150,7 @@ def update_client(module, export):
@api_wrapper
def delete_client(module, export):
- """Update export client list"""
+ """delete export client from client list"""
if export is None and module.params['state'] == 'absent':
module.exit_json(changed=False)
@@ -168,13 +173,8 @@ def delete_client(module, export):
return changed
-def get_sys_exp(module):
- system = get_system(module)
- export = get_export(module, system)
- return (system, export)
-
-
def get_export_client_fields(export, client_name):
+ """ Get export client fields """
fields = export.get_fields() # from_cache=True, raw_value=True)
permissions = fields.get('permissions', None)
for munched_perm in permissions:
@@ -185,13 +185,15 @@ def get_export_client_fields(export, client_name):
no_root_squash=perm['no_root_squash'],
)
return field_dict
- raise AssertionError("No client {0} match to exports found".format(client_name))
+ raise AssertionError(f"No client {client_name} match to exports found")
def handle_stat(module):
- system, export = get_sys_exp(module)
+ """ Execute the stat state """
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
- module.fail_json(msg='Export {0} not found'.format(module.params['export']))
+ module.fail_json(msg=f"Export {module.params['export']} not found")
client_name = module.params['client']
field_dict = get_export_client_fields(export, client_name)
result = dict(
@@ -203,9 +205,11 @@ def handle_stat(module):
def handle_present(module):
- system, export = get_sys_exp(module)
+ """ Execute the present state """
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
- msg = 'Export {0} not found'.format(module.params['export'])
+ msg = f"Export {module.params['export']} not found"
module.fail_json(msg=msg)
changed = update_client(module, export)
@@ -214,7 +218,9 @@ def handle_present(module):
def handle_absent(module):
- system, export = get_sys_exp(module)
+ """ Execute the absent state """
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
changed = False
msg = "Export client already absent"
@@ -226,6 +232,7 @@ def handle_absent(module):
def execute_state(module):
+ """ Execute a state """
state = module.params['state']
try:
if state == 'stat':
@@ -235,13 +242,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py
new file mode 100644
index 000000000..f64808af9
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" Manage switch names on Infinibox """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_fibre_channel_switch
+version_added: 2.16.0
+short_description: Manage Infinibox FC switch names
+description:
+ - This module renames FC switch names (rename state) or shows information about FC switches (stat state)
+author: David Ohlemacher (@ohlemacher)
+options:
+ switch_name:
+ description:
+ - Current name of an existing fibre channel switch.
+ type: str
+ required: true
+ new_switch_name:
+ description:
+ - New name for an existing fibre channel switch.
+ type: str
+ required: false
+ state:
+ description:
+ - Rename an FC switch name, when using state rename.
+ - States present and absent are not implemented.
+ - State stat shows the existing FC switch details.
+ type: str
+ required: false
+ default: rename
+ choices: [ "stat", "rename" ]
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Rename fibre channel switch
+ infini_fibre_channel:
+ switch_name: VSAN 100
+ state: rename
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Get information about fibre channel switch
+ infini_fibre_channel:
+ switch_name: VSAN 2000
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ merge_two_dicts,
+ get_system,
+ infinibox_argument_spec,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+def find_switch_by_name(module):
+ """ Find switch by name """
+ switch = module.params['switch_name']
+ path = f"fc/switches?name={switch}"
+ system = get_system(module)
+ try:
+ switch_result = system.api.get(path=path).get_result()
+ if not switch_result:
+ msg = f"Cannot find switch {switch}"
+ module.exit_json(msg=msg)
+ except APICommandFailed as err:
+ msg = f"Cannot find switch {switch}: {err}"
+ module.exit_json(msg=msg)
+ return switch_result[0]
+
+
+def handle_stat(module):
+ """ Handle stat state """
+ switch_name = module.params['switch_name']
+ switch_result = find_switch_by_name(module)
+ result = dict(
+ changed=False,
+ msg=f"Switch stat {switch_name} found"
+ )
+ result = merge_two_dicts(result, switch_result)
+ module.exit_json(**result)
+
+
+def handle_rename(module):
+ """ Handle rename state """
+ switch_name = module.params['switch_name']
+ new_switch_name = module.params['new_switch_name']
+
+ switch_result = find_switch_by_name(module)
+ switch_id = switch_result['id']
+
+ path = f"fc/switches/{switch_id}"
+ data = {
+ "name": new_switch_name,
+ }
+ try:
+ system = get_system(module)
+ rename_result = system.api.put(path=path, data=data).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot rename fc switch {switch_name}: {err}"
+ module.exit_json(msg=msg)
+
+ result = dict(
+ changed=True,
+ msg=f"FC switch renamed from {switch_name} to {new_switch_name}"
+ )
+ result = merge_two_dicts(result, rename_result)
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "rename":
+ handle_rename(module)
+ else:
+ module.exit_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ new_switch_name = module.params["new_switch_name"]
+ state = module.params["state"]
+
+ if state in ["rename"]:
+ if not new_switch_name:
+ msg = "New switch name parameter must be provided"
+ module.exit_json(msg=msg)
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ switch_name=dict(required=True, type="str"),
+ new_switch_name=dict(required=False, type="str"),
+ state=dict(default="rename", choices=["stat", "rename"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
index f9cd2bd56..d4d75a07a 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
@@ -1,27 +1,77 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module manages file systems on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: infini_fs
-version_added: '2.3.0'
+version_added: 2.3.0
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: David Ohlemacher (@ohlemacher)
options:
+ fs_type:
+ description:
+ - Specifies the file system type, regular or snapshot.
+ type: str
+ required: false
+ default: master
+ choices: [ "master", "snapshot" ]
name:
description:
- File system name.
+ required: false
+ type: str
+ parent_fs_name:
+ description:
+ - Specify a fs name. This is the fs parent for creating a snapshot. Required if fs_type is snapshot.
+ type: str
+ required: false
+ pool:
+ description:
+ - Pool that will host file system.
required: true
type: str
+ restore_fs_from_snapshot:
+ description:
+ - Specify true to restore a file system (parent_fs_name) from an existing snapshot specified by the name field.
+ - State must be set to present and fs_type must be 'snapshot'.
+ type: bool
+ required: false
+ default: false
+ serial:
+ description:
+ - Serial number matching an existing file system.
+ required: false
+ type: str
+ size:
+ description:
+ - File system size in MB, GB or TB units. See examples.
+ required: false
+ type: str
+ snapshot_lock_expires_at:
+ description:
+ - This will cause a snapshot to be locked at the specified date-time.
+ Uses python's datetime format YYYY-mm-dd HH:MM:SS.ffffff, e.g. 2020-02-13 16:21:59.699700
+ type: str
+ required: false
+ snapshot_lock_only:
+ description:
+ - This will lock an existing snapshot but will suppress refreshing the snapshot.
+ type: bool
+ required: false
+ default: false
state:
description:
- Creates/Modifies file system when present or removes when absent.
@@ -35,63 +85,96 @@ options:
required: false
default: true
type: bool
- pool:
+ write_protected:
description:
- - Pool that will host file system.
- required: true
+ - Specifies if the file system should be write protected. Default will be True for snapshots, False for master file systems.
type: str
- size:
- description:
- - File system size in MB, GB or TB units. See examples.
required: false
- type: str
+ default: "Default"
+ choices: ["Default", "True", "False"]
extends_documentation_fragment:
- infinibox
requirements:
- capacity
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
- size: 1TB
+ size: 1GB
pool: bar
thin_provision: true
state: present
user: admin
password: secret
system: ibox001
-'''
+- name: Create snapshot named foo_snap from fs named foo
+ infini_fs:
+ name: foo_snap
+ pool: bar
+ fs_type: snapshot
+ parent_fs_name: foo
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Stat snapshot, also a fs, named foo_snap
+ infini_fs:
+ name: foo_snap
+ pool: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Remove snapshot, also a fs, named foo_snap
+ infini_fs:
+ name: foo_snap
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
# RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
+HAS_INFINISDK = True
try:
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
- HAS_INFINISDK,
api_wrapper,
- infinibox_argument_spec,
+ check_snapshot_lock_options,
+ get_filesystem,
+ get_fs_by_sn,
get_pool,
get_system,
- get_filesystem
+ infinibox_argument_spec,
+ manage_snapshot_locks,
)
except ModuleNotFoundError:
from infinibox import ( # Used when hacking
- HAS_INFINISDK,
api_wrapper,
- infinibox_argument_spec,
+ check_snapshot_lock_options,
+ get_filesystem,
get_pool,
get_system,
- get_filesystem
+ infinibox_argument_spec,
+ manage_snapshot_locks,
)
+except ImportError:
+ HAS_INFINISDK = False
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+ from infinisdk.core.exceptions import ObjectNotFound
+except ImportError:
+ HAS_INFINISDK = False
CAPACITY_IMP_ERR = None
try:
from capacity import KiB, Capacity
+
HAS_CAPACITY = True
except ImportError:
HAS_CAPACITY = False
@@ -99,152 +182,414 @@ except ImportError:
@api_wrapper
def create_filesystem(module, system):
- """Create Filesystem"""
- changed = True
+ """ Create Filesystem """
+ changed = False
if not module.check_mode:
- if module.params['thin_provision']:
- provisioning = 'THIN'
+ if module.params["thin_provision"]:
+ provisioning = "THIN"
else:
- provisioning = 'THICK'
+ provisioning = "THICK"
+
filesystem = system.filesystems.create(
- name=module.params['name'],
- pool=get_pool(module, system),
+ name=module.params["name"],
provtype=provisioning,
+ pool=get_pool(module, system),
)
- if module.params['size']:
- size = Capacity(module.params['size']).roundup(64 * KiB)
+ if module.params["size"]:
+ size = Capacity(module.params["size"]).roundup(64 * KiB)
filesystem.update_size(size)
+
+ is_write_prot = filesystem.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ filesystem.update_field("write_protected", desired_is_write_prot)
+ changed = True
return changed
@api_wrapper
def update_filesystem(module, filesystem):
- """Update Filesystem"""
+ """ Update Filesystem """
changed = False
- if module.params['size']:
- size = Capacity(module.params['size']).roundup(64 * KiB)
+
+ if module.check_mode:
+ return changed
+
+ if module.params["size"]:
+ size = Capacity(module.params["size"]).roundup(64 * KiB)
if filesystem.get_size() != size:
- if not module.check_mode:
- filesystem.update_size(size)
+ filesystem.update_size(size)
changed = True
+ if module.params["thin_provision"] is not None:
provisioning = str(filesystem.get_provisioning())
- if provisioning == 'THICK' and module.params['thin_provision']:
- if not module.check_mode:
- filesystem.update_provisioning('THIN')
+ if provisioning == "THICK" and module.params["thin_provision"]:
+ filesystem.update_provisioning("THIN")
changed = True
- if provisioning == 'THIN' and not module.params['thin_provision']:
- if not module.check_mode:
- filesystem.update_provisioning('THICK')
+ if provisioning == "THIN" and not module.params["thin_provision"]:
+ filesystem.update_provisioning("THICK")
changed = True
+
+ is_write_prot = filesystem.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ filesystem.update_field("write_protected", desired_is_write_prot)
+ changed = True
+
return changed
@api_wrapper
def delete_filesystem(module, filesystem):
- """ Delete Filesystem"""
+ """ Delete Filesystem """
+ changed = False
if not module.check_mode:
filesystem.delete()
- module.exit_json(changed=True)
+ changed = True
+ return changed
-def get_sys_pool_fs(module):
- system = get_system(module)
- pool = get_pool(module, system)
- filesystem = get_filesystem(module, system)
- return (system, pool, filesystem)
+@api_wrapper
+def create_fs_snapshot(module, system):
+ """ Create Snapshot from parent fs """
+ snapshot_name = module.params["name"]
+ parent_fs_name = module.params["parent_fs_name"]
+ changed = False
+ if not module.check_mode:
+ try:
+ parent_fs = system.filesystems.get(name=parent_fs_name)
+ except ObjectNotFound:
+ msg = f"Cannot create snapshot {snapshot_name}. Parent file system {parent_fs_name} not found"
+ module.fail_json(msg=msg)
+ if not parent_fs:
+ msg = f"Cannot find new snapshot's parent file system named {parent_fs_name}"
+ module.fail_json(msg=msg)
+ if not module.check_mode:
+ if module.params["snapshot_lock_only"]:
+ msg = "Snapshot does not exist. Cannot comply with 'snapshot_lock_only: true'."
+ module.fail_json(msg=msg)
+ check_snapshot_lock_options(module)
+ snapshot = parent_fs.create_snapshot(name=snapshot_name)
+
+ is_write_prot = snapshot.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ snapshot.update_field("write_protected", desired_is_write_prot)
+
+ manage_snapshot_locks(module, snapshot)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_fs_snapshot(module, snapshot):
+ """ Update/refresh fs snapshot. May also lock it. """
+ refresh_changed = False
+ lock_changed = False
+ if not module.check_mode:
+ if not module.params["snapshot_lock_only"]:
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ if not snap_is_locked:
+ if not module.check_mode:
+ snapshot.refresh_snapshot()
+ refresh_changed = True
+ else:
+ msg = "File system snapshot is locked and may not be refreshed"
+ module.fail_json(msg=msg)
+
+ check_snapshot_lock_options(module)
+ lock_changed = manage_snapshot_locks(module, snapshot)
+
+ if module.params["write_protected"] is not None:
+ is_write_prot = snapshot.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ snapshot.update_field("write_protected", desired_is_write_prot)
+
+ return refresh_changed or lock_changed
+
+
+@api_wrapper
+def find_fs_id(module, system, fs_name):
+ """ Find the ID of this fs """
+ fs_url = f"filesystems?name={fs_name}&fields=id"
+ fs = system.api.get(path=fs_url)
+
+ result = fs.get_json()["result"]
+ if len(result) != 1:
+ module.fail_json(f"Cannot find a file ststem with name '{fs_name}'")
+
+ fs_id = result[0]["id"]
+ return fs_id
+
+
+@api_wrapper
+def restore_fs_from_snapshot(module, system):
+ """ Use snapshot to restore a file system """
+ changed = False
+ is_restoring = module.params["restore_fs_from_snapshot"]
+ fs_type = module.params["fs_type"]
+ snap_name = module.params["name"]
+ snap_id = find_fs_id(module, system, snap_name)
+ parent_fs_name = module.params["parent_fs_name"]
+ parent_fs_id = find_fs_id(module, system, parent_fs_name)
+
+ # Check params
+ if not is_restoring:
+ raise AssertionError("A programming error occurred. is_restoring is not True")
+ if fs_type != "snapshot":
+ module.exit_json(msg="Cannot restore a parent file system from snapshot unless the file system type is 'snapshot'")
+ if not parent_fs_name:
+ module.exit_json(msg="Cannot restore a parent file system from snapshot unless the parent file system name is specified")
+
+ if not module.check_mode:
+ restore_url = f"filesystems/{parent_fs_id}/restore?approved=true"
+ restore_data = {
+ "source_id": snap_id,
+ }
+ try:
+ system.api.post(path=restore_url, data=restore_data)
+ changed = True
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot restore file system {parent_fs_name} from snapshot {snap_name}: {str(err)}")
+ return changed
def handle_stat(module):
- system, pool, filesystem = get_sys_pool_fs(module)
- if not pool:
- module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
+ """ Handle the stat state """
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ filesystem = get_filesystem(module, system)
+ else:
+ filesystem = get_fs_by_sn(module, system)
+ fs_type = module.params["fs_type"]
+
+ if fs_type == "master":
+ if not pool:
+ module.fail_json(msg=f"Pool {module.params['pool']} not found")
if not filesystem:
- module.fail_json(msg='File system {0} not found'.format(module.params['name']))
+ module.fail_json(msg=f"File system {module.params['name']} not found")
fields = filesystem.get_fields() # from_cache=True, raw_value=True)
+
+ created_at = str(fields.get("created_at", None))
+ filesystem_id = fields.get("id", None)
+ filesystem_type = fields.get("type", None)
+ has_children = fields.get("has_children", None)
+ lock_expires_at = str(filesystem.get_lock_expires_at())
+ lock_state = filesystem.get_lock_state()
+ mapped = str(fields.get("mapped", None))
name = fields.get("name", None)
- used = fields.get('used_size', None)
- filesystem_id = fields.get('id', None)
- provisioning = fields.get('provisioning', None)
+ parent_id = fields.get("parent_id", None)
+ provisioning = fields.get("provisioning", None)
+ serial = fields.get("serial", None)
+ size = str(filesystem.get_size())
+ updated_at = str(fields.get("updated_at", None))
+ used = str(fields.get("used_size", None))
+ write_protected = fields.get("write_protected", None)
+ if filesystem_type == "SNAPSHOT":
+ msg = "File system snapshot stat found"
+ else:
+ msg = "File system stat found"
result = dict(
changed=False,
- name=name,
- size=str(filesystem.get_size()),
- used=str(used),
+ created_at=created_at,
filesystem_id=filesystem_id,
+ filesystem_type=filesystem_type,
+ has_children=has_children,
+ lock_state=lock_state,
+ lock_expires_at=lock_expires_at,
+ mapped=mapped,
+ msg=msg,
+ name=name,
+ parent_id=parent_id,
provisioning=provisioning,
- msg='File system stat found'
+ serial=serial,
+ size=size,
+ updated_at=updated_at,
+ used=used,
+ write_protected=write_protected,
)
module.exit_json(**result)
def handle_present(module):
- system, pool, filesystem = get_sys_pool_fs(module)
- if not pool:
- module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
- if not filesystem:
- changed = create_filesystem(module, system)
- module.exit_json(changed=changed, msg="File system created")
+ """ Handle the present state """
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ filesystem = get_filesystem(module, system)
else:
- changed = update_filesystem(module, filesystem)
- module.exit_json(changed=changed, msg="File system updated")
+ filesystem = get_fs_by_sn(module, system)
+ fs_type = module.params["fs_type"]
+ is_restoring = module.params["restore_fs_from_snapshot"]
+ if fs_type == "master":
+ if not pool:
+ module.fail_json(msg=f"Pool {module.params['pool']} not found")
+ if not filesystem:
+ changed = create_filesystem(module, system)
+ module.exit_json(changed=changed, msg="File system created")
+ else:
+ changed = update_filesystem(module, filesystem)
+ module.exit_json(changed=changed, msg="File system updated")
+ elif fs_type == "snapshot":
+ snapshot = filesystem
+ if is_restoring:
+ # Restore fs from snapshot
+ changed = restore_fs_from_snapshot(module, system)
+ snap_fs_name = module.params["name"]
+ parent_fs_name = module.params["parent_fs_name"]
+ msg = f"File system {parent_fs_name} restored from snapshot {snap_fs_name}"
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ if not snapshot:
+ changed = create_fs_snapshot(module, system)
+ module.exit_json(changed=changed, msg="File system snapshot created")
+ else:
+ changed = update_fs_snapshot(module, filesystem)
+ module.exit_json(changed=changed, msg="File system snapshot updated")
def handle_absent(module):
- system, pool, filesystem = get_sys_pool_fs(module)
+ """ Handle the absent state """
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ filesystem = get_filesystem(module, system)
+ else:
+ filesystem = get_fs_by_sn(module, system)
+
+ if filesystem and filesystem.get_lock_state() == "LOCKED":
+ msg = "Cannot delete snapshot. Locked."
+ module.fail_json(changed=False, msg=msg)
+
if not pool or not filesystem:
module.exit_json(changed=False, msg="File system already absent")
- else:
+
+ existing_fs_type = filesystem.get_type()
+
+ if existing_fs_type == "MASTER":
changed = delete_filesystem(module, filesystem)
module.exit_json(changed=changed, msg="File system removed")
+ elif existing_fs_type == "SNAPSHOT":
+ snapshot = filesystem
+ changed = delete_filesystem(module, snapshot)
+ module.exit_json(changed=changed, msg="Snapshot removed")
+ else:
+ module.fail_json(msg="A programming error has occured")
def execute_state(module):
- state = module.params['state']
+ """ Execute states """
+ state = module.params["state"]
try:
- if state == 'stat':
+ if state == "stat":
handle_stat(module)
- elif state == 'present':
+ elif state == "present":
handle_present(module)
- elif state == 'absent':
+ elif state == "absent":
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
finally:
system = get_system(module)
system.logout()
+def check_options(module):
+ """Verify module options are sane"""
+ name = module.params["name"]
+ serial = module.params["serial"]
+ state = module.params["state"]
+ size = module.params["size"]
+ pool = module.params["pool"]
+ fs_type = module.params["fs_type"]
+ parent_fs_name = module.params["parent_fs_name"]
+
+ if state == "stat":
+ if not name and not serial:
+ msg = "Name or serial parameter must be provided"
+ module.fail_json(msg=msg)
+ if state in ["present", "absent"]:
+ if not name:
+ msg = "Name parameter must be provided"
+ module.fail_json(msg=msg)
+
+ if state == "present":
+ if fs_type == "master":
+ if parent_fs_name:
+ msg = "parent_fs_name should not be specified "
+ msg += "if fs_type is 'master'. Used for snapshots only."
+ module.fail_json(msg=msg)
+ if not size:
+ msg = "Size is required to create a master file system"
+ module.fail_json(msg=msg)
+ if not pool:
+ msg = "For state 'present', pool is required"
+ module.fail_json(msg=msg)
+ elif fs_type == "snapshot":
+ if size:
+ msg = "Size should not be specified "
+ msg += "for fs_type snapshot"
+ module.fail_json(msg=msg)
+ if not parent_fs_name:
+ msg = "For state 'present' and fs_type 'snapshot', "
+ msg += "parent_fs_name is required"
+ module.fail_json(msg=msg)
+ else:
+ msg = "A programming error has occurred"
+ module.fail_json(msg=msg)
+
+
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name=dict(required=True),
- state=dict(default='present', choices=['stat', 'present', 'absent']),
+ fs_type=dict(choices=["master", "snapshot"], default="master"),
+ name=dict(required=False, default=None),
+ parent_fs_name=dict(default=None, required=False),
pool=dict(required=True),
+ restore_fs_from_snapshot=dict(default=False, type="bool"),
+ serial=dict(required=False, default=None),
size=dict(),
- thin_provision=dict(type=bool, default=True),
+ snapshot_lock_expires_at=dict(),
+ snapshot_lock_only=dict(required=False, type="bool", default=False),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ thin_provision=dict(default=True, type="bool"),
+ write_protected=dict(choices=["True", "False", "Default"], default="Default"),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if module.params["write_protected"] == "Default":
+ if module.params["fs_type"] == "master": # Use default for master fs
+ module.params["write_protected"] = False
+ else: # Use default for snapshot
+ module.params["write_protected"] = True
+ else:
+ module.params["write_protected"] = module.params["write_protected"] == "True"
+
if not HAS_INFINISDK:
- module.fail_json(msg=missing_required_lib('infinisdk'))
+ module.fail_json(msg=missing_required_lib("infinisdk"))
if not HAS_CAPACITY:
- module.fail_json(msg=missing_required_lib('capacity'))
+ module.fail_json(msg=missing_required_lib("capacity"))
- if module.params['size']:
+ if module.params["size"]:
try:
- Capacity(module.params['size'])
- except Exception:
- module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+ Capacity(module.params["size"])
+ except Exception: # pylint: disable=broad-exception-caught
+ module.fail_json(
+ msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units"
+ )
+ check_options(module)
execute_state(module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
index 68d78546e..91eeab2ee 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position,multiple-statements
+
+""" Manage hosts on Infinibox """
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -11,7 +15,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_host
-version_added: '2.3.0'
+version_added: 2.3.0
short_description: Create, Delete or Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
@@ -20,10 +24,12 @@ options:
name:
description:
- Host Name
+ type: str
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
+ type: str
required: false
default: present
choices: [ "stat", "present", "absent" ]
@@ -44,9 +50,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
-from infi.dtypes.iqn import make_iscsi_name
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -60,22 +63,16 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
@api_wrapper
def create_host(module, system):
-
+ """ Create a host """
changed = True
-
if not module.check_mode:
- host = system.hosts.create(name=module.params['name'])
- return changed
-
-
-@api_wrapper
-def update_host(module, host):
- changed = False
+ system.hosts.create(name=module.params['name'])
return changed
@api_wrapper
def delete_host(module, host):
+ """ Delete a host """
changed = True
if not module.check_mode:
# May raise APICommandFailed if mapped, etc.
@@ -83,13 +80,8 @@ def delete_host(module, host):
return changed
-def get_sys_host(module):
- system = get_system(module)
- host = get_host(module, system)
- return (system, host)
-
-
def get_host_fields(host):
+ """ Get host fields """
fields = host.get_fields(from_cache=True, raw_value=True)
created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
field_dict = dict(
@@ -117,45 +109,52 @@ def get_host_fields(host):
def handle_stat(module):
- system, host = get_sys_host(module)
+ """ Handle the stat state """
+ system = get_system(module)
+ host = get_host(module, system)
host_name = module.params["name"]
if not host:
- module.fail_json(msg='Host {0} not found'.format(host_name))
+ module.fail_json(msg=f'Host {host_name} not found')
field_dict = get_host_fields(host)
result = dict(
changed=False,
- msg='Host stat found'
+ msg=f'Host {host_name} stat found'
)
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
def handle_present(module):
- system, host = get_sys_host(module)
+ """ Handle the present state """
+ system = get_system(module)
+ host = get_host(module, system)
host_name = module.params["name"]
if not host:
changed = create_host(module, system)
- msg = 'Host {0} created'.format(host_name)
+ msg = f'Host {host_name} created'
module.exit_json(changed=changed, msg=msg)
else:
- changed = update_host(module, host)
- msg = 'Host {0} updated'.format(host_name)
+ changed = False
+ msg = f'Host {host_name} exists and does not need to be updated'
module.exit_json(changed=changed, msg=msg)
def handle_absent(module):
- system, host = get_sys_host(module)
+ """ Handle the absent state """
+ system = get_system(module)
+ host = get_host(module, system)
host_name = module.params["name"]
if not host:
- msg = "Host {0} already absent".format(host_name)
+ msg = f"Host {host_name} already absent"
module.exit_json(changed=False, msg=msg)
else:
changed = delete_host(module, host)
- msg = "Host {0} removed".format(host_name)
+ msg = f"Host {host_name} removed"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Execute a state """
state = module.params['state']
try:
if state == 'stat':
@@ -165,13 +164,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
index e3757e021..1111930a3 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies mappings on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -11,7 +15,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_map
-version_added: '2.9.0'
+version_added: 2.9.0
short_description: Create and Delete mapping of a volume to a host or cluster on Infinibox
description:
- This module creates or deletes mappings of volumes to hosts or clusters
@@ -24,10 +28,12 @@ options:
host:
description:
- Host Name
+ type: str
required: false
cluster:
description:
- Cluster Name
+ type: str
required: false
state:
description:
@@ -40,10 +46,12 @@ options:
volume:
description:
- Volume name to map to the host.
+ type: str
required: true
lun:
description:
- Volume lun.
+ type: int
extends_documentation_fragment:
- infinibox
'''
@@ -86,15 +94,8 @@ EXAMPLES = r'''
password: secret
'''
-
# RETURN = r''' # '''
-import traceback
-# import sh
-
-# rescan_scsi = sh.Command("rescan-scsi-bus.sh")
-# rescan_scsi_remove = rescan_scsi.bake("--remove")
-
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
@@ -102,7 +103,6 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
api_wrapper,
get_cluster,
get_host,
- get_pool,
get_system,
get_volume,
infinibox_argument_spec,
@@ -110,39 +110,31 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
)
try:
- from infinisdk.core.exceptions import APICommandFailed, ObjectNotFound
+ from infinisdk.core.exceptions import APICommandFailed
except ImportError:
pass # Handled by HAS_INFINISDK from module_utils
def vol_is_mapped_to_host(volume, host):
- volume_fields = volume.get_fields()
- volume_id = volume_fields.get('id')
+ """ Return a bool showing if a vol is mapped to a host """
host_luns = host.get_luns()
- # print('volume id: {0}'.format(volume_id))
- # print('host luns: {0}'.format(str(host_luns)))
for lun in host_luns:
if lun.volume == volume:
- # print('found mapped volume: {0}'.format(volume))
return True
return False
def vol_is_mapped_to_cluster(volume, cluster):
- volume_fields = volume.get_fields()
- volume_id = volume_fields.get('id')
+ """ Return a bool showing if a vol is mapped to a cluster """
cluster_luns = cluster.get_luns()
- # print('volume id: {0}'.format(volume_id))
- # print('host luns: {0}'.format(str(host_luns)))
-
for lun in cluster_luns:
if lun.volume == volume:
- # print('found mapped volume: {0}'.format(volume))
return True
return False
def find_host_lun_use(module, host, volume):
+ """ Return a dict showing if a host lun matches a volume. """
check_result = {'lun_used': False, 'lun_volume_matches': False}
desired_lun = module.params['lun']
@@ -158,12 +150,13 @@ def find_host_lun_use(module, host, volume):
def find_cluster_lun_use(module, cluster, volume):
+ """ Return a dict showing if a cluster lun matches a volume. """
check_result = {'lun_used': False, 'lun_volume_matches': False}
desired_lun = module.params['lun']
if desired_lun:
for cluster_lun in cluster.get_luns():
- if desired_lun == cluster.lun:
+ if desired_lun == cluster_lun:
if cluster.volume == volume:
check_result = {'lun_used': True, 'lun_volume_matches': True}
else:
@@ -173,6 +166,7 @@ def find_cluster_lun_use(module, cluster, volume):
def find_host_lun(host, volume):
+ """ Find a hosts lun """
found_lun = None
luns = host.get_luns()
@@ -183,6 +177,7 @@ def find_host_lun(host, volume):
def find_cluster_lun(cluster, volume):
+ """ Find a cluster's LUN """
found_lun = None
luns = cluster.get_luns()
@@ -194,12 +189,8 @@ def find_cluster_lun(cluster, volume):
@api_wrapper
def create_mapping(module, system):
- """
- Create mapping of volume to host or cluster. If already mapped, exit_json with changed False.
- """
+ """ Create mapping of volume to host or cluster. If already mapped, exit_json with changed False. """
- host_name = module.params['host']
- cluster_name = module.params['cluster']
host = get_host(module, system)
cluster = get_cluster(module, system)
@@ -221,20 +212,18 @@ def create_mapping(module, system):
@api_wrapper
def create_mapping_to_cluster(module, system):
- """
- Create mapping of volume to cluster. If already mapped, exit_json with changed False.
- """
+ """ Create mapping of volume to cluster. If already mapped, exit_json with changed False. """
changed = False
cluster = get_cluster(module, system)
volume = get_volume(module, system)
+ volume_name = module.params['volume']
+ cluster_name = module.params['cluster']
+ lun_name = module.params['lun']
lun_use = find_cluster_lun_use(module, cluster, volume)
if lun_use['lun_used']:
- msg = "Cannot create mapping of volume '{0}' to cluster '{1}' using lun '{2}'. Lun in use.".format(
- volume.get_name(),
- cluster.get_name(),
- module.params['lun'])
+ msg = f"Cannot create mapping of volume '{volume_name}' to cluster '{cluster_name}' using lun '{lun_name}'. Lun in use."
module.fail_json(msg=msg)
try:
@@ -244,30 +233,26 @@ def create_mapping_to_cluster(module, system):
changed = True
except APICommandFailed as err:
if "is already mapped" not in str(err):
- module.fail_json('Cannot map volume {0} to cluster {1}: {2}. Already mapped.'.format(
- module.params['volume'],
- module.params['cluster'],
- str(err)))
+ msg = f"Cannot map volume '{volume_name}' to cluster '{cluster_name}': {str(err)}. Already mapped."
+ module.fail_json(msg=msg)
return changed
@api_wrapper
def create_mapping_to_host(module, system):
- """
- Create mapping of volume to host. If already mapped, exit_json with changed False.
- """
+ """ Create mapping of volume to host. If already mapped, exit_json with changed False. """
changed = False
host = system.hosts.get(name=module.params['host'])
volume = get_volume(module, system)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
+ lun_name = module.params['lun']
lun_use = find_host_lun_use(module, host, volume)
if lun_use['lun_used']:
- msg = "Cannot create mapping of volume '{0}' to host '{1}' using lun '{2}'. Lun in use.".format(
- volume.get_name(),
- host.get_name(),
- module.params['lun'])
+ msg = f"Cannot create mapping of volume '{volume_name}' to host '{host_name}' using lun '{lun_name}'. Lun in use."
module.fail_json(msg=msg)
try:
@@ -277,35 +262,29 @@ def create_mapping_to_host(module, system):
changed = True
except APICommandFailed as err:
if "is already mapped" not in str(err):
- module.fail_json('Cannot map volume {0} to host {1}: {2}. Already mapped.'.format(
- module.params['volume'],
- module.params['host'],
- str(err)))
+ msg = f"Cannot map volume '{host_name}' to host '{host_name}': {str(err)}. Already mapped."
+ module.fail_json(msg=msg)
return changed
@api_wrapper
def update_mapping_to_host(module, system):
+ """ Update a mapping to a host """
host = get_host(module, system)
volume = get_volume(module, system)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
desired_lun = module.params['lun']
if not vol_is_mapped_to_host(volume, host):
- msg = "Volume {0} is not mapped to host {1}".format(
- volume.get_name(),
- host.get_name(),
- )
+ msg = f"Volume '{volume_name}' is not mapped to host '{host_name}'"
module.fail_json(msg=msg)
if desired_lun:
found_lun = find_host_lun(host, volume)
if found_lun != desired_lun:
- msg = "Cannot change the lun from '{0}' to '{1}' for existing mapping of volume '{2}' to host '{3}'".format(
- found_lun,
- desired_lun,
- volume.get_name(),
- host.get_name())
+ msg = f"Cannot change the lun from '{found_lun}' to '{desired_lun}' for existing mapping of volume '{volume_name}' to host '{host_name}'"
module.fail_json(msg=msg)
changed = False
@@ -314,25 +293,21 @@ def update_mapping_to_host(module, system):
@api_wrapper
def update_mapping_to_cluster(module, system):
+ """ Update a mapping to a cluster """
cluster = get_cluster(module, system)
volume = get_volume(module, system)
desired_lun = module.params['lun']
+ volume_name = module.params['volume']
+ cluster_name = module.params['cluster']
if not vol_is_mapped_to_cluster(volume, cluster):
- msg = "Volume {0} is not mapped to cluster {1}".format(
- volume.get_name(),
- cluster.get_name(),
- )
+ msg = f"Volume {volume_name} is not mapped to cluster {cluster_name}"
module.fail_json(msg=msg)
if desired_lun:
found_lun = find_cluster_lun(cluster, volume)
if found_lun != desired_lun:
- msg = "Cannot change the lun from '{0}' to '{1}' for existing mapping of volume '{2}' to cluster '{3}'".format(
- found_lun,
- desired_lun,
- volume.get_name(),
- cluster.get_name())
+ msg = f"Cannot change the lun from '{found_lun}' to '{desired_lun}' for existing mapping of volume '{volume_name}' to cluster '{cluster_name}'"
module.fail_json(msg=msg)
changed = False
@@ -341,6 +316,7 @@ def update_mapping_to_cluster(module, system):
@api_wrapper
def delete_mapping(module, system):
+ """ Delete a mapping """
host = get_host(module, system)
cluster = get_cluster(module, system)
if host:
@@ -373,34 +349,22 @@ def delete_mapping_to_host(module, system):
if not module.check_mode:
volume = get_volume(module, system)
host = get_host(module, system)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
if volume and host:
try:
existing_lun = find_host_lun(host, volume)
host.unmap_volume(volume)
changed = True
- msg = "Volume '{0}' was unmapped from host '{1}' freeing lun '{2}'".format(
- module.params['volume'],
- module.params['host'],
- existing_lun,
- )
-
+ msg = f"Volume '{volume_name}' was unmapped from host '{host_name}' freeing lun '{existing_lun}'"
except KeyError as err:
if 'has no logical units' not in str(err):
- module.fail_json('Cannot unmap volume {0} from host {1}: {2}'.format(
- module.params['volume'],
- module.params['host'],
- str(err)))
+ module.fail_json(f"Cannot unmap volume '{volume_name}' from host '{host_name}': {str(err)}")
else:
- msg = "Volume {0} was not mapped to host {1} and so unmapping was not executed".format(
- module.params['volume'],
- module.params['host'],
- )
+ msg = f"Volume '{volume_name}' was not mapped to host '{host_name}' and so unmapping was not executed"
else:
- msg = "Either volume {0} or host {1} does not exist. Unmapping was not executed".format(
- module.params['volume'],
- module.params['host'],
- )
+ msg = f"Either volume '{volume_name}' or host '{host_name}' does not exist. Unmapping was not executed"
else: # check_mode
changed = True
@@ -421,55 +385,31 @@ def delete_mapping_to_cluster(module, system):
if not module.check_mode:
volume = get_volume(module, system)
cluster = get_cluster(module, system)
+ volume_name = module.params['volume']
+ cluster_name = module.params['cluster']
if volume and cluster:
try:
existing_lun = find_cluster_lun(cluster, volume)
cluster.unmap_volume(volume)
changed = True
- msg = "Volume '{0}' was unmapped from cluster '{1}' freeing lun '{2}'".format(
- module.params['volume'],
- module.params['cluster'],
- existing_lun,
- )
+ msg = f"Volume '{volume_name}' was unmapped from cluster '{cluster_name}' freeing lun '{existing_lun}'"
except KeyError as err:
if 'has no logical units' not in str(err):
- module.fail_json('Cannot unmap volume {0} from cluster {1}: {2}'.format(
- module.params['volume'],
- module.params['cluster'],
- str(err)))
+ msg = f"Cannot unmap volume '{volume_name}' from cluster '{cluster_name}': {str(err)}"
+ module.fail_json(msg=msg)
else:
- msg = "Volume {0} was not mapped to cluster {1} and so unmapping was not executed".format(
- module.params['volume'],
- module.params['cluster'],
- )
+ msg = f"Volume '{volume_name}' was not mapped to cluster '{cluster_name}' and so unmapping was not executed"
else:
- msg = "Either volume {0} or cluster {1} does not exist. Unmapping was not executed".format(
- module.params['volume'],
- module.params['cluster'],
- )
+ msg = f"Either volume '{volume_name}' or cluster '{cluster_name}' does not exist. Unmapping was not executed"
else: # check_mode
changed = True
module.exit_json(msg=msg, changed=changed)
-def get_sys_vol_host_cluster(module):
- system = get_system(module)
- volume = get_volume(module, system)
- host = get_host(module, system)
- cluster = get_cluster(module, system)
- return (system, volume, host, cluster)
-
-
-def get_sys_vol_cluster(module):
- system = get_system(module)
- volume = get_volume(module, system)
- cluster = get_cluster(module, system)
- return (system, volume, cluster)
-
-
def get_mapping_fields(volume, host_or_cluster):
+ """ Get mapping fields """
luns = host_or_cluster.get_luns()
for lun in luns:
if volume.get_name() == lun.volume.get_name():
@@ -481,50 +421,56 @@ def get_mapping_fields(volume, host_or_cluster):
def handle_stat(module):
- system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ """ Return mapping stat """
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
volume_name = module.params['volume']
host_name = module.params['host']
- if not host_name:
- host_name = "not specified"
-
cluster_name = module.params['cluster']
- if not cluster_name:
- cluster_name = "not specified"
if not volume:
- module.fail_json(msg='Volume {0} not found'.format(volume_name))
+ module.fail_json(msg=f"Volume '{volume_name}' not found")
+
if not host and not cluster:
- module.fail_json(msg='Neither host [{0}] nor cluster [{1}] found'.format(host_name, cluster_name))
+ msg = f"Neither host '{host_name}' nor cluster '{cluster_name}' found"
+ module.fail_json(msg=msg)
+
if (not host or not vol_is_mapped_to_host(volume, host)) \
and (not cluster or not vol_is_mapped_to_cluster(volume, cluster)):
- msg = 'Volume {0} is mapped to neither host {1} nor cluster {2}'.format(volume_name, host_name, cluster_name)
+ if host_name:
+ msg = f"Volume '{volume_name}' is not mapped to host '{host_name}'"
+ elif cluster_name:
+ msg = f"Volume '{volume_name}' is not mapped to cluster '{cluster_name}'"
module.fail_json(msg=msg)
- if host:
+
+ if host and host_name:
found_lun = find_host_lun(host, volume)
field_dict = get_mapping_fields(volume, host)
if found_lun is not None:
- msg = 'Volume {0} is mapped to host {1} using lun {2}'.format(volume_name, host_name, found_lun),
+ msg = f"Volume '{volume_name}' is mapped to host '{host_name}' using lun '{found_lun}'"
result = dict(
changed=False,
volume_lun=found_lun,
msg=msg,
)
else:
- msg = 'Volume {0} is not mapped to host {1}'.format(volume_name, host_name)
+ msg = f"Volume '{volume_name}' is not mapped to host '{host_name}'"
module.fail_json(msg=msg)
- elif cluster:
+ elif cluster and cluster_name:
found_lun = find_cluster_lun(cluster, volume)
field_dict = get_mapping_fields(volume, cluster)
if found_lun is not None:
- msg = 'Volume {0} is mapped to cluster {1} using lun {2}'.format(volume_name, cluster_name, found_lun)
+ msg = f"Volume '{volume_name}' is mapped to cluster '{cluster_name}' using lun '{found_lun}'"
result = dict(
changed=False,
volume_lun=found_lun,
msg=msg,
)
else:
- msg = 'Volume {0} is not mapped to cluster {1}'.format(volume_name, cluster_name)
+ msg = f"Volume '{volume_name}' is not mapped to cluster '{cluster_name}'"
module.fail_json(msg=msg)
else:
msg = 'A programming error has occurred in handle_stat()'
@@ -534,50 +480,38 @@ def handle_stat(module):
def handle_present(module):
- system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ """ Create or update mapping """
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
volume_name = module.params['volume']
host_name = module.params['host']
cluster_name = module.params['cluster']
if not volume:
- module.fail_json(changed=False, msg='Volume {0} not found'.format(volume_name))
+ module.fail_json(changed=False, msg=f"Volume '{volume_name}' not found")
if not host and not cluster:
if not host_name:
host_name = "not specified"
if not cluster_name:
cluster_name = "not specified"
- module.fail_json(changed=False, msg='Neither host [{0}] nor cluster [{1}] found'.format(host_name, cluster_name))
+ module.fail_json(changed=False, msg=f"Neither host '{host_name}' nor cluster '{cluster_name}' found")
if host:
if not vol_is_mapped_to_host(volume, host):
changed = create_mapping(module, system)
- # TODO: Why is find_host_lun() returning None after creating the mapping?
- # host.get_luns() returns an empty list, why?
- # existing_lun = find_host_lun(host, volume)
- # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
- # volume.get_name(),
- # host.get_name(),
- # existing_lun,
- # )
- msg = "Volume '{0}' map to host '{1}' created".format(volume_name, host_name)
+ msg = f"Volume '{volume_name}' map to host '{host_name}' created"
else:
changed = update_mapping_to_host(module, system)
existing_lun = find_host_lun(host, volume)
- msg = "Volume '{0}' map to host '{1}' already exists using lun '{2}'".format(volume_name, host_name, existing_lun)
+ msg = f"Volume '{volume_name}' map to host '{host_name}' already exists using lun '{existing_lun}'"
elif cluster:
if not vol_is_mapped_to_cluster(volume, cluster):
changed = create_mapping(module, system)
- # TODO: Why is find_host_lun() returning None after creating the mapping?
- # host.get_luns() returns an empty list, why?
- # existing_lun = find_host_lun(host, volume)
- # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
- # volume.get_name(),
- # host.get_name(),
- # existing_lun,
- # )
- msg = "Volume '{0}' map to cluster '{1}' created".format(volume_name, cluster_name)
+ msg = f"Volume '{volume_name}' map to cluster '{cluster_name}' created"
else:
changed = update_mapping_to_cluster(module, system)
existing_lun = find_cluster_lun(cluster, volume)
- msg = "Volume '{0}' map to cluster '{1}' already exists using lun '{2}'".format(volume_name, cluster_name, existing_lun)
+ msg = f"Volume '{volume_name}' map to cluster '{cluster_name}' already exists using lun '{existing_lun}'"
result = dict(
changed=changed,
@@ -587,18 +521,23 @@ def handle_present(module):
def handle_absent(module):
- system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ """ Remove mapping """
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
volume_name = module.params['volume']
host_name = module.params['host']
cluster_name = module.params['cluster']
if not volume or (not host and not cluster):
- module.exit_json(changed=False, msg='Mapping of volume {0} to host {1} or cluster {2} already absent'.format(volume_name, host_name, cluster_name))
+ module.exit_json(changed=False, msg=f'Mapping of volume {volume_name} to host {host_name} or cluster {cluster_name} already absent')
else:
changed = delete_mapping(module, system)
module.exit_json(changed=changed, msg="Mapping removed")
def execute_state(module):
+ """Determine which state function to execute and do so"""
state = module.params['state']
try:
if state == 'stat':
@@ -608,14 +547,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def check_parameters(module):
- volume_name = module.params['volume']
+ """Verify module options are sane"""
host_name = module.params['host']
cluster_name = module.params['cluster']
if host_name and cluster_name:
@@ -628,17 +567,15 @@ def check_parameters(module):
def main():
- """
- Gather auguments and manage mapping of vols to hosts.
- """
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- host=dict(required=False, default=""),
- cluster=dict(required=False, default=""),
+ host=dict(required=False, default=None),
+ cluster=dict(required=False, default=None),
state=dict(default='present', choices=['stat', 'present', 'absent']),
volume=dict(required=True),
- lun=dict(type=int),
+ lun=dict(type="int"),
)
)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py
new file mode 100644
index 000000000..15374a52e
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py
@@ -0,0 +1,674 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies metadata on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_metadata
+version_added: 2.13.0
+short_description: Create, Delete or Modify metadata on Infinibox
+description:
+ - This module creates, deletes or modifies metadata on Infinibox.
+ - Deleting metadata by object, without specifying a key, is not implemented for any object_type (e.g. DELETE api/rest/metadata/system).
+ - This would delete all metadata belonging to the object. Instead delete each key explicitely using its key name.
+author: David Ohlemacher (@ohlemacher)
+options:
+ object_type:
+ description:
+ - Type of object
+ type: str
+ required: true
+ choices: ["cluster", "fs", "fs-snap", "host", "pool", "system", "vol", "vol-snap"]
+ object_name:
+ description:
+ - Name of the object. Not used if object_type is system
+ type: str
+ required: false
+ key:
+ description:
+ - Name of the metadata key
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the metadata key
+ type: str
+ required: false
+ state:
+ description:
+ - Creates/Modifies metadata when present or removes when absent.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create new metadata key foo with value bar
+ infini_metadata:
+ name: foo
+ key: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Stat metadata key named foo
+ infini_metadata:
+ name: foo
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+- name: Remove metadata keyn named foo
+ infini_vol:
+ name: foo_snap
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ get_cluster,
+ get_filesystem,
+ get_host,
+ get_pool,
+ get_system,
+ get_volume,
+ infinibox_argument_spec,
+)
+
+HAS_INFINISDK = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ HAS_INFINISDK = False
+
+HAS_CAPACITY = False
+
+
+@api_wrapper
+def get_metadata_vol(module, disable_fail):
+ """ Get metadata about a volume """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ vol = get_volume(module, system)
+ if vol:
+ path = f"metadata/{vol.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Volume {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Volume with object name {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_fs(module, disable_fail):
+ """ Get metadata about a fs """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ fs = get_filesystem(module, system)
+ if fs:
+ path = f"metadata/{fs.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"File system {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"File system named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_host(module, disable_fail):
+ """ Get metadata about a host """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ host = get_host(module, system)
+ if host:
+ path = f"metadata/{host.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Host {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Host named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_cluster(module, disable_fail):
+ """ Get metadata about a cluster """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ cluster = get_cluster(module, system)
+ if cluster:
+ path = f"metadata/{cluster.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Cluster {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Cluster named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_fssnap(module, disable_fail):
+ """ Get metadata about a fs snapshot """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ fssnap = get_filesystem(module, system)
+ if fssnap:
+ path = f"metadata/{fssnap.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"File system snapshot {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"File system snapshot named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_pool(module, disable_fail):
+ """ Get metadata about a pool """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ pool = get_pool(module, system)
+ if pool:
+ path = f"metadata/{pool.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Pool {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Pool named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_volsnap(module, disable_fail):
+ """ Get metadata for a volume snapshot """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ volsnap = get_volume(module, system)
+ if volsnap:
+ path = f"metadata/{volsnap.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Volume snapshot {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Volume snapshot named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata(module, disable_fail=False):
+ """
+ Find and return metadata
+ Use disable_fail when we are looking for metadata
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+
+ if object_type == "system":
+ path = f"metadata/{object_type}?key={key}"
+ metadata = system.api.get(path=path)
+ elif object_type == "fs":
+ metadata = get_metadata_fs(module, disable_fail)
+ elif object_type == "vol":
+ metadata = get_metadata_vol(module, disable_fail)
+ elif object_type == "host":
+ metadata = get_metadata_host(module, disable_fail)
+ elif object_type == "cluster":
+ metadata = get_metadata_cluster(module, disable_fail)
+ elif object_type == "fs-snap":
+ metadata = get_metadata_fs(module, disable_fail)
+ elif object_type == "pool":
+ metadata = get_metadata_pool(module, disable_fail)
+ elif object_type == "vol-snap":
+ metadata = get_metadata_volsnap(module, disable_fail)
+
+ else:
+ msg = f"Metadata for {object_type} not supported. Cannot stat."
+ module.fail_json(msg=msg)
+
+ if metadata:
+ result = metadata.get_result()
+ if not disable_fail and not result:
+ msg = f"Metadata for {object_type} with key {key} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ return result
+
+ if disable_fail:
+ return None
+
+ msg = f"Metadata for {object_type} named {object_name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ return None # Quiet pylint
+
+
+@api_wrapper
+def put_metadata(module): # pylint: disable=too-many-statements
+ """Create metadata key with a value. The changed variable is found elsewhere."""
+ system = get_system(module)
+
+ object_type = module.params["object_type"]
+ key = module.params["key"]
+ value = module.params["value"]
+
+ # Could check metadata value size < 32k
+
+ if object_type == "system":
+ path = "metadata/system"
+ elif object_type == "vol":
+ vol = get_volume(module, system)
+ if not vol:
+ object_name = module.params["object_name"]
+ msg = f"Volume {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{vol.id}"
+ elif object_type == "fs":
+ fs = get_filesystem(module, system)
+ if not fs:
+ object_name = module.params["object_name"]
+ msg = f"File system {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{fs.id}"
+ elif object_type == "host":
+ host = get_host(module, system)
+ if not host:
+ object_name = module.params["object_name"]
+ msg = f"Cluster {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{host.id}"
+ elif object_type == "cluster":
+ cluster = get_cluster(module, system)
+ if not cluster:
+ object_name = module.params["object_name"]
+ msg = f"Cluster {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{cluster.id}"
+ elif object_type == "fs-snap":
+ fssnap = get_filesystem(module, system)
+ if not fssnap:
+ object_name = module.params["object_name"]
+ msg = f"File system snapshot {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{fssnap.id}"
+ elif object_type == "pool":
+ pool = get_pool(module, system)
+ if not pool:
+ object_name = module.params["object_name"]
+ msg = f"Pool {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{pool.id}"
+ elif object_type == "vol-snap":
+ volsnap = get_volume(module, system)
+ if not volsnap:
+ object_name = module.params["object_name"]
+ msg = f"Volume snapshot {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{volsnap.id}"
+
+ # Create json data
+ data = {
+ key: value
+ }
+
+ # Put
+ system.api.put(path=path, data=data)
+ # Variable 'changed' not returned by design
+
+
+@api_wrapper
+def delete_metadata(module): # pylint: disable=too-many-return-statements
+ """
+ Remove metadata key.
+ Not implemented by design: Deleting all of the system's metadata
+ using 'DELETE api/rest/metadata/system'.
+ """
+ system = get_system(module)
+ changed = False
+ object_type = module.params["object_type"]
+ key = module.params["key"]
+ if object_type == "system":
+ path = f"metadata/system/{key}"
+ elif object_type == "vol":
+ vol = get_volume(module, system)
+ if not vol:
+ changed = False
+ return changed # No vol therefore no metadata to delete
+ path = f"metadata/{vol.id}/{key}"
+ elif object_type == "fs":
+ fs = get_filesystem(module, system)
+ if not fs:
+ changed = False
+ return changed # No fs therefore no metadata to delete
+ path = f"metadata/{fs.id}/{key}"
+ elif object_type == "host":
+ host = get_host(module, system)
+ if not host:
+ changed = False
+ return changed # No host therefore no metadata to delete
+ path = f"metadata/{host.id}/{key}"
+ elif object_type == "cluster":
+ cluster = get_cluster(module, system)
+ if not cluster:
+ changed = False
+ return changed # No cluster therefore no metadata to delete
+ path = f"metadata/{cluster.id}/{key}"
+ elif object_type == "fs-snap":
+ fssnap = get_filesystem(module, system)
+ if not fssnap:
+ changed = False
+ return changed # No fssnap therefore no metadata to delete
+ path = f"metadata/{fssnap.id}/{key}"
+ elif object_type == "pool":
+ pool = get_pool(module, system)
+ if not pool:
+ changed = False
+ return changed # No pool therefore no metadata to delete
+ path = f"metadata/{pool.id}/{key}"
+ elif object_type == "vol-snap":
+ volsnap = get_volume(module, system)
+ if not volsnap:
+ changed = False
+ return changed # No volsnap therefore no metadata to delete
+ path = f"metadata/{volsnap.id}/{key}"
+ else:
+ module.fail_json(f"Object type {object_type} not supported")
+
+ try:
+ system.api.delete(path=path)
+ changed = True
+ except APICommandFailed as err:
+ if err.status_code != 404:
+ raise
+ return changed
+
+
+def handle_stat(module):
+ """Return metadata stat"""
+ object_type = module.params["object_type"]
+ key = module.params["key"]
+ metadata = get_metadata(module)
+ if object_type == "system":
+ metadata_id = metadata[0]["id"]
+ object_id = metadata[0]["object_id"]
+ value = metadata[0]["value"]
+ else:
+ metadata_id = metadata["id"]
+ object_id = metadata["object_id"]
+ value = metadata["value"]
+
+ result = {
+ "msg": "Metadata found",
+ "changed": False,
+ "object_type": object_type,
+ "key": key,
+ "id": metadata_id,
+ "object_id": object_id,
+ "value": value,
+ }
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """Make metadata present"""
+ changed = False
+ msg = "Metadata unchanged"
+ if not module.check_mode:
+ old_metadata = get_metadata(module, disable_fail=True)
+ put_metadata(module)
+ new_metadata = get_metadata(module)
+ changed = new_metadata != old_metadata
+ if changed:
+ msg = "Metadata changed"
+ else:
+ msg = "Metadata unchanged since the value is the same as the existing metadata"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """Make metadata absent"""
+ msg = "Metadata unchanged"
+ changed = False
+ if not module.check_mode:
+ changed = delete_metadata(module)
+ if changed:
+ msg = "Metadata removed"
+ else:
+ msg = "Metadata did not exist so no removal was necessary"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ state = module.params["state"]
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+
+ # Check object_type
+ object_types = [
+ "cluster",
+ "fs",
+ "fs-snap",
+ "host",
+ "pool",
+ "system",
+ "vol",
+ "vol-snap",
+ ]
+ if object_type not in object_types:
+ module.fail_json(
+ f"Cannot create {object_type} metadata. Object type must be one of {object_types}"
+ )
+
+ # Check object_name
+ if object_type == "system":
+ if object_name:
+ module.fail_json("An object_name for object_type system must not be provided.")
+ else:
+ if not object_name:
+ module.fail_json(
+ f"The name of the {object_type} must be provided as object_name."
+ )
+
+ key = module.params["key"]
+ if not key:
+ module.fail_json(f"Cannot create a {object_type} metadata key without providing a key name")
+
+ if state == "stat":
+ pass
+ elif state == "present":
+ # Check value
+ key = module.params["key"]
+ value = module.params["value"]
+ if not value:
+ module.fail_json(
+ f"Cannot create a {object_type} metadata key {key} without providing a value"
+ )
+ # Check system object_type
+ if object_type == "system":
+ if key == "ui-dataset-default-provisioning":
+ values = ["THICK", "THIN"]
+ if value not in values:
+ module.fail_json(
+ f"Cannot create {object_type} metadata for key {key}. "
+ f"Value must be one of {values}. Invalid value: {value}."
+ )
+
+ # Convert bool string to bool
+ if key in [
+ "ui-dataset-base2-units",
+ "ui-feedback-dialog",
+ "ui-feedback-form",
+ ]:
+ try:
+ module.params["value"] = json.loads(value.lower())
+ except json.decoder.JSONDecodeError:
+ module.fail_json(
+ f"Cannot create {object_type} metadata for key {key}. "
+ f"Value must be able to be decoded as a boolean. Invalid value: {value}."
+ )
+
+ # Convert integer string to int
+ if key in [
+ "ui-bulk-volume-zero-padding",
+ "ui-table-export-limit"
+ ]:
+ try:
+ module.params["value"] = json.loads(value.lower())
+ except json.decoder.JSONDecodeError:
+ module.fail_json(
+ f"Cannot create {object_type} metadata for key {key}. "
+ f"Value must be of type integer. Invalid value: {value}."
+ )
+
+ elif state == "absent":
+ pass
+ else:
+ module.fail_json(f"Invalid state '{state}' provided")
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "object_type": {"required": True, "choices": ["cluster", "fs", "fs-snap", "host", "pool", "system", "vol", "vol-snap"]},
+ "object_name": {"required": False, "default": None},
+ "key": {"required": True, "no_log": False},
+ "value": {"required": False, "default": None},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
index 013d86e5e..f9f02e11d 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
@@ -1,9 +1,13 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""This module creates, deletes or modifies network spaces on Infinibox."""
+
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
@@ -20,25 +24,35 @@ options:
name:
description:
- Network space name
+ type: str
required: true
state:
description:
- Creates/Modifies network spaces when present. Removes when absent. Shows status when stat.
+ type: str
required: false
default: present
choices: [ "stat", "present", "absent" ]
interfaces:
description:
- - A list of interfaces for the space.
+ - A list of interface IDs for the space.
required: false
type: list
- elements: str
+ elements: int
+ default: []
+ network_config:
+ description:
+ - A network description.
+ type: dict
+ default: {}
+ required: false
service:
description:
- Choose a service.
+ type: str
required: false
- default: "replication"
- choices: ["replication", "NAS", "iSCSI"]
+ default: "RMR_SERVICE"
+ choices: ["RMR_SERVICE", "NAS_SERVICE", "ISCSI_SERVICE"]
mtu:
description:
- Set an MTU. If not specified, defaults to 1500 bytes.
@@ -54,6 +68,11 @@ options:
- Network mask.
required: false
type: int
+ default_gateway:
+ description:
+ - Default gateway.
+ type: str
+ required: false
ips:
description:
- List of IPs.
@@ -69,7 +88,12 @@ options:
- It does not affect sync-replication or active-active traffic.
required: false
type: int
-
+ async_only:
+ description:
+ - Run asynchronously only.
+ required: false
+ type: bool
+ default: false
extends_documentation_fragment:
- infinibox
'''
@@ -80,20 +104,20 @@ EXAMPLES = r'''
name: iSCSI
state: present
interfaces:
- - 1680
- - 1679
- - 1678
+ - 1680
+ - 1679
+ - 1678
service: ISCSI_SERVICE
netmask: 19
network: 172.31.32.0
default_gateway: 172.31.63.254
ips:
- - 172.31.32.145
- - 172.31.32.146
- - 172.31.32.147
- - 172.31.32.148
- - 172.31.32.149
- - 172.31.32.150
+ - 172.31.32.145
+ - 172.31.32.146
+ - 172.31.32.147
+ - 172.31.32.148
+ - 172.31.32.149
+ - 172.31.32.150
user: admin
password: secret
system: ibox001
@@ -103,28 +127,24 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
infinibox_argument_spec,
get_system,
- unixMillisecondsToDate,
merge_two_dicts,
get_net_space,
)
try:
from infinisdk.core.exceptions import APICommandFailed
- from infinisdk.core.exceptions import ObjectNotFound
- from infi.dtypes.iqn import make_iscsi_name
except ImportError:
pass # Handled by HAS_INFINISDK from module_utils
@api_wrapper
def create_empty_network_space(module, system):
+ """ Create an empty network space """
# Create network space
network_space_name = module.params["name"]
service = module.params["service"]
@@ -137,9 +157,7 @@ def create_empty_network_space(module, system):
}
interfaces = module.params["interfaces"]
- # print("Creating network space {0}".format(network_space_name))
- product_id = system.api.get('system/product_id')
- # print("api: {0}".format(product_id.get_result()))
+ # product_id = system.api.get('system/product_id')
net_create_url = "network/spaces"
net_create_data = {
@@ -153,11 +171,13 @@ def create_empty_network_space(module, system):
if mtu:
net_create_data["mtu"] = mtu
- net_create = system.api.post(
- path=net_create_url,
- data=net_create_data
- )
- # print("net_create: {0}".format(net_create))
+ try:
+ system.api.post(
+ path=net_create_url,
+ data=net_create_data
+ )
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot create empty network space {network_space_name}: {err}")
@api_wrapper
@@ -166,36 +186,33 @@ def find_network_space_id(module, system):
Find the ID of this network space
"""
network_space_name = module.params["name"]
- net_id_url = "network/spaces?name={0}&fields=id".format(network_space_name)
+ net_id_url = f"network/spaces?name={network_space_name}&fields=id"
net_id = system.api.get(
path=net_id_url
)
result = net_id.get_json()['result'][0]
space_id = result['id']
- # print("Network space has ID {0}".format(space_id))
return space_id
@api_wrapper
def add_ips_to_network_space(module, system, space_id):
+ """ Add IPs to space. Ignore address conflict errors. """
network_space_name = module.params["name"]
- # print("Adding IPs to network space {0}".format(network_space_name))
-
ips = module.params["ips"]
for ip in ips:
- ip_url = "network/spaces/{0}/ips".format(space_id)
+ ip_url = f"network/spaces/{space_id}/ips"
ip_data = ip
- ip_add = system.api.post(
- path=ip_url,
- data=ip_data
- )
- # print("add_ips json: {0}".format(ip_add.get_json()))
- result = ip_add.get_json()['result']
- # print("add ip result: {0}".format(result))
+ try:
+ system.api.post(path=ip_url, data=ip_data)
+ except APICommandFailed as err:
+ if err.error_code != "NET_SPACE_ADDRESS_CONFLICT": # Ignore
+ module.fail_json(msg=f"Cannot add IP {ip} to network space {network_space_name}: {err}")
@api_wrapper
def create_network_space(module, system):
+ """ Create a network space """
if not module.check_mode:
# Create space
create_empty_network_space(module, system)
@@ -214,54 +231,43 @@ def create_network_space(module, system):
def update_network_space(module, system):
"""
Update network space.
- TODO - This is incomplete and will not update the space.
- It will instead return changed=False and a message.
- To implement this we will need to find the existing space.
- For each field that we support updating, we need to compare existing
- to new values and if different update. We will need to iterate
- over the settings or we will receive:
- Status: 400
- Code: NOT_SUPPORTED_MULTIPLE_UPDATE
+ Update fields individually. If grouped the API will generate
+ a NOT_SUPPORTED_MULTIPLE_UPDATE error.
"""
- changed = False
- msg = "Update is not supported yet"
- module.exit_json(changed=changed, msg=msg)
-
- # TODO Everything below is incomplete
- # Update network space
- network_space_name = module.params["name"]
- service = module.params["service"]
- network_config = {
- "netmask": module.params["netmask"],
- "network": module.params["network"],
- "default_gateway": module.params["default_gateway"],
- }
- interfaces = module.params["interfaces"]
-
- # print("Updating network space {0}".format(network_space_name))
-
- # Find space's ID
space_id = find_network_space_id(module, system)
-
- net_url = "network/spaces/{0}".format(space_id)
- net_data = {
- "name": network_space_name,
- "service": service,
- "network_config": network_config,
- "interfaces": interfaces,
- }
-
- # Find existing space
- net_existing = system.api.get(path=net_url)
-
- net_update = system.api.put(
- path=net_url,
- data=net_data
- )
- # print("net_update: {0}".format(net_update))
+ datas = [
+ {"interfaces": module.params["interfaces"]},
+ {"mtu": module.params["mtu"]},
+ {"network_config":
+ {
+ "default_gateway": module.params["default_gateway"],
+ "netmask": module.params["netmask"],
+ "network": module.params["network"],
+ }
+ },
+ {"rate_limit": module.params["rate_limit"]},
+ {"properties":
+ {
+ "is_async_only": module.params["async_only"],
+ }
+ },
+ ]
+ for data in datas:
+ try:
+ system.api.put(
+ path=f"network/spaces/{space_id}",
+ data=data
+ )
+ except APICommandFailed as err:
+ msg = f"Cannot update network space: {err}"
+ module.fail_json(msg=msg)
+ add_ips_to_network_space(module, system, space_id)
+ changed = True
+ return changed
-def get_network_space_fields(module, network_space):
+def get_network_space_fields(network_space):
+ """ Get the network space fields and return as a dict """
fields = network_space.get_fields(from_cache=True, raw_value=True)
field_dict = dict(
@@ -282,16 +288,18 @@ def get_network_space_fields(module, network_space):
def handle_stat(module):
+ """ Return details about the space """
network_space_name = module.params["name"]
system = get_system(module)
net_space = get_net_space(module, system)
if not net_space:
- module.fail_json(msg="Network space {0} not found".format(network_space_name))
+ module.fail_json(msg=f"Network space {network_space_name} not found")
- field_dict = get_network_space_fields(module, net_space)
+ field_dict = get_network_space_fields(net_space)
result = dict(
- changed=False, msg="Network space {0} stat found".format(network_space_name)
+ changed=False,
+ msg=f"Network space {network_space_name} stat found"
)
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
@@ -305,14 +313,41 @@ def handle_present(module):
system = get_system(module)
net_space = get_net_space(module, system)
if net_space:
- changed = update_network_space(module, net_space)
- msg = "Host {0} updated".format(network_space_name)
+ changed = update_network_space(module, system)
+ msg = f"Network space named {network_space_name} updated"
else:
changed = create_network_space(module, system)
- msg = "Network space {0} created".format(network_space_name)
+ msg = f"Network space named {network_space_name} created"
module.exit_json(changed=changed, msg=msg)
+def disable_and_delete_ip(module, network_space, ip):
+ """
+ Disable and delete a network space IP
+ """
+ if not ip:
+ return # Nothing to do
+ addr = ip['ip_address']
+ network_space_name = module.params["name"]
+ ip_type = ip['type']
+ mgmt = ""
+ if ip_type == "MANAGEMENT":
+ mgmt = "management " # Trailing space by design
+
+ try:
+ try:
+ network_space.disable_ip_address(addr)
+ except APICommandFailed as err:
+ if err.error_code == "IP_ADDRESS_ALREADY_DISABLED":
+ pass
+ else:
+ module.fail_json(msg=f"Disabling of network space {network_space_name} IP {mgmt}{addr} API command failed")
+
+ network_space.remove_ip_address(addr)
+ except Exception as err: # pylint: disable=broad-exception-caught
+ module.fail_json(msg=f"Disabling or removal of network space {network_space_name} IP {mgmt}{addr} failed: {err}")
+
+
def handle_absent(module):
"""
Remove a namespace. First, may disable and remove the namespace's IPs.
@@ -322,55 +357,34 @@ def handle_absent(module):
network_space = get_net_space(module, system)
if not network_space:
changed = False
- msg = "Network space {0} already absent".format(network_space_name)
+ msg = f"Network space {network_space_name} already absent"
else:
# Find IPs from space
ips = list(network_space.get_ips())
# Disable and delete IPs from space
if not module.check_mode:
+ management_ip = None # Must be disabled and deleted last
for ip in ips:
- addr = ip["ip_address"]
-
- # print("Disabling IP {0}".format(addr))
- try:
- network_space.disable_ip_address(addr)
- except APICommandFailed as err:
- if err.error_code == "IP_ADDRESS_ALREADY_DISABLED":
- pass
- # print("Already disabled IP {0}".format(addr))
- else:
- # print("Failed to disable IP {0}".format(addr))
- module.fail_json(
- msg="Disabling of network space {0} IP {1} failed".format(
- network_space_name, addr
- )
- )
-
- # print("Removing IP {0}".format(addr))
- try:
- network_space.remove_ip_address(addr)
- except Exception as err:
- module.fail_json(
- msg="Removal of network space {0} IP {1} failed: {2}".format(
- network_space_name, addr, err
- )
- )
+ if ip['type'] == 'MANAGEMENT':
+ management_ip = ip
+ continue
+ disable_and_delete_ip(module, network_space, ip)
+ disable_and_delete_ip(module, network_space, management_ip)
# Delete space
network_space.delete()
changed = True
- msg = "Network space {0} removed".format(network_space_name)
+ msg = f"Network space {network_space_name} removed"
else:
changed = False
- msg = "Network space {0} not altered due to checkmode".format(
- network_space_name
- )
+ msg = f"Network space {network_space_name} not altered due to checkmode"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Execute a state """
state = module.params["state"]
try:
if state == "stat":
@@ -381,7 +395,7 @@ def execute_state(module):
handle_absent(module)
else:
module.fail_json(
- msg="Internal handler error. Invalid state: {0}".format(state)
+ msg=f"Internal handler error. Invalid state: {state}"
)
finally:
system = get_system(module)
@@ -389,6 +403,7 @@ def execute_state(module):
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
@@ -397,18 +412,19 @@ def main():
default="present", required=False, choices=["stat", "present", "absent"]
),
service=dict(
- default="replication",
+ default="RMR_SERVICE",
required=False,
- choices=["replication", "NAS_SERVICE", "ISCSI_SERVICE"],
+ choices=["RMR_SERVICE", "NAS_SERVICE", "ISCSI_SERVICE"],
),
- mtu=dict(default=None, required=False, type=int),
+ mtu=dict(default=None, required=False, type="int"),
network=dict(default=None, required=False),
- netmask=dict(default=None, required=False, type=int),
+ netmask=dict(default=None, required=False, type="int"),
default_gateway=dict(default=None, required=False),
interfaces=dict(default=list(), required=False, type="list", elements="int"),
- network_config=dict(default=dict(), required=False, type=dict),
+ network_config=dict(default=dict(), required=False, type="dict"),
ips=dict(default=list(), required=False, type="list", elements="str"),
- rate_limit=dict(default=None, required=False, type=int),
+ rate_limit=dict(default=None, required=False, type="int"),
+ async_only=dict(default=False, required=False, type="bool"),
)
)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py
new file mode 100644
index 000000000..1916cdb49
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies metadata on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_notification_rule
+version_added: 2.13.0
+short_description: Config notification rules
+description:
+ - This module config notification rules on Infinibox
+author: Wei Wang (@wwang)
+options:
+ name:
+ description:
+ - Name of the rule
+ type: str
+ required: true
+ event_level:
+ description:
+ - Event levels
+ type: list
+ elements: str
+ required: false
+ default: []
+ include_events:
+ description:
+ - Included events
+ type: list
+ elements: str
+ required: false
+ default: []
+ exclude_events:
+ description:
+ - Exclued events
+ type: list
+ elements: str
+ required: false
+ default: []
+ recipients:
+ description:
+ - Email list of the recipients
+ - Recipients and target are exclusive to each other, i.e. only recipients or target
+ should be used, don't use both at the same time.
+ type: list
+ elements: str
+ required: false
+ default: []
+ target:
+ description:
+ - Notification target
+ - Recipients and target are exclusive to each other, i.e. only recipients or target
+ should be used, don't use both at the same time.
+ type: str
+ required: false
+ state:
+ description:
+ - Query or modifies config.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create a new notification rule to a target
+ infini_notification_rule:
+ name: "test-rule-to-target" # this need to be uniq
+ event_level:
+ - ERROR
+ - CRITICAL
+ include_events:
+ - ACTIVATION_PAUSED
+ exclude_events:
+ - ACTIVE_DIRECTORY_ALL_DOMAIN_CONTROLLERS_DOWN
+ - ACTIVE_DIRECTORY_LEFT
+ target: testgraylog1
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+"""
+
+# RETURN = r''' # '''
+
+# -*- coding: utf-8 -*-
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+HAS_ARROW = False
+
+try:
+ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ )
+except ModuleNotFoundError:
+ from infinibox import ( # Used when hacking
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ )
+
+
+@api_wrapper
+def find_target_id(module, system):
+ """ Find the ID of the target by name """
+ target = module.params["target"]
+ path = f"notifications/targets?name={target}&fields=id"
+ api_result = system.api.get(
+ path=path
+ )
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ target_id = result['id']
+ else:
+ target_id = None
+ return target_id
+
+
+@api_wrapper
+def get_rules(module):
+ """ Get all rules """
+ system = get_system(module)
+ path = "notifications/rules"
+ rules = system.api.get(path=path)
+ return rules
+
+
+@api_wrapper
+def find_rule_id(module, system):
+ """ Find the ID of the rule by name """
+ rule_name = module.params["name"]
+ path = f"notifications/rules?name={rule_name}&fields=id"
+ api_result = system.api.get(
+ path=path
+ )
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ rule_id = result['id']
+ else:
+ rule_id = None
+ return rule_id
+
+
+@api_wrapper
+def delete_rule(module):
+ """ Delete a notification rule """
+ system = get_system(module)
+ rule_id = find_rule_id(module, system)
+ path = f"notifications/rules/{rule_id}?approved=true"
+ system.api.delete(path=path)
+
+
+@api_wrapper
+def create_rule(module):
+ """ Create a new notifition rule """
+ system = get_system(module)
+ name = module.params["name"]
+ event_level = module.params["event_level"]
+ include_events = module.params["include_events"]
+ exclude_events = module.params["exclude_events"]
+ recipients = module.params["recipients"]
+ target = module.params["target"]
+ path = "notifications/rules"
+
+ json_data = {
+ "name": name,
+ "event_level": event_level,
+ "include_events": include_events,
+ "exclude_events": exclude_events,
+ }
+
+ if recipients:
+ target_parameters = {
+ "recipients": recipients
+ }
+ target_id = 3 # Target ID for sending to recipients
+ json_data["target_parameters"] = target_parameters
+ elif target:
+ target_id = find_target_id(module, system)
+ else:
+ msg = "Neither recipients nor target parameters specified"
+ module.fail_json(msg=msg)
+
+ json_data["target_id"] = target_id
+
+ system.api.post(path=path, data=json_data)
+
+
+@api_wrapper
+def update_rule(module):
+ """
+ Update an existing rule.
+ """
+ system = get_system(module)
+ name = module.params["name"]
+ event_level = module.params["event_level"]
+ include_events = module.params["include_events"]
+ exclude_events = module.params["exclude_events"]
+ recipients = module.params["recipients"]
+ target = module.params["target"]
+
+ json_data = {
+ "name": name,
+ "event_level": event_level,
+ "include_events": include_events,
+ "exclude_events": exclude_events,
+ }
+
+ if recipients:
+ target_parameters = {
+ "recipients": recipients
+ }
+ target_id = 3 # Target ID for sending to recipients
+ json_data["target_parameters"] = target_parameters
+ elif target:
+ target_id = find_target_id(module, system)
+ else:
+ msg = "Neither recipients nor target parameters specified"
+ module.fail_json(msg=msg)
+
+ json_data["target_id"] = target_id
+ rule_id = find_rule_id(module, system)
+ path = f"notifications/rules/{rule_id}"
+ system.api.put(path=path, data=json_data)
+
+
+def handle_present(module):
+ """ Create or update a rule """
+ system = get_system(module)
+ name = module.params["name"]
+ changed = False
+ if not module.check_mode:
+ rule_id = find_rule_id(module, system)
+ if not rule_id:
+ create_rule(module)
+ changed = True
+ msg = f"Rule named {name} created"
+ else:
+ update_rule(module)
+ msg = f"Rule named {name} updated"
+ changed = True
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_stat(module):
+ """ Return rule stat """
+ result = None
+ system = get_system(module)
+ name = module.params['name']
+ rule_id = find_rule_id(module, system)
+ if rule_id:
+ path = f"notifications/rules/{rule_id}"
+ api_result = system.api.get(path=path)
+ result = api_result.get_json()['result']
+ result["rule_id"] = result.pop("id") # Rename id to rule_id
+ result["msg"] = f"Stat for notification rule named {name}"
+ result["changed"] = False
+ module.exit_json(**result)
+ msg = f"Notification rule {name} not found"
+ module.fail_json(msg=msg)
+
+
+def handle_absent(module):
+ """ Remove rule """
+ changed = False
+ name = module.params["name"]
+ system = get_system(module)
+
+ rule_id = find_rule_id(module, system)
+ if not rule_id:
+ msg = f"Rule named {name} not found. Deletion not required."
+ changed = False
+ else:
+ msg = f"Rule named {name} has been deleted"
+ changed = True
+ if not module.check_mode:
+ delete_rule(module)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ recipients = module.params['recipients']
+ target = module.params['target']
+ if recipients and target:
+ msg = "Cannot specify both recipients and target parameters"
+ module.fail_json(msg=msg)
+ if recipients:
+ for recipient in recipients:
+ if len(recipient) == 1:
+ msg = f"{recipient} is an invalid email address. Recipients '{recipients}' must be provided as a list, e.g. '[ \"user@example.com\" ]'"
+ module.fail_json(msg=msg)
+ if '@' not in recipient:
+ msg = f"{recipient} is an invalid email address"
+ module.fail_json(msg=msg)
+
+
+def main():
+ """Main module function"""
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "name": {"required": True},
+ "event_level": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "include_events": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "exclude_events": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "recipients": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "target": {"required": False, "type": "str", "default": None},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py
new file mode 100644
index 000000000..a9e707289
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies metadata on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_notification_target
+version_added: 2.13.0
+short_description: Config notification target
+description:
+ - This module configures syslog notification targets on an Infinibox
+author: Wei Wang (@wwang)
+options:
+ name:
+ description:
+ - Name of the syslog target
+ type: str
+ required: true
+ host:
+ description:
+ - Host name or IP address of the target
+ type: str
+ required: false
+ port:
+ description:
+ - Port of the target
+ type: int
+ required: false
+ default: 514
+ transport:
+ description:
+ - TCP or UDP
+ type: str
+ required: false
+ choices:
+ - UDP
+ - TCP
+ default: UDP
+ protocol:
+ description:
+ - Protocol used for this target. Currently, the only valid value is SYSLOG.
+ type: str
+ required: false
+ choices:
+ - SYSLOG
+ default: SYSLOG
+ facility:
+ description:
+ - Facility
+ choices:
+ - LOCAL0
+ - LOCAL1
+ - LOCAL2
+ - LOCAL3
+ - LOCAL4
+ - LOCAL5
+ - LOCAL6
+ - LOCAL7
+ type: str
+ required: false
+ default: LOCAL7
+ visibility:
+ description:
+ - Visibility
+ type: str
+ choices:
+ - CUSTOMER
+ - INFINIDAT
+ required: false
+ default: CUSTOMER
+ post_test:
+ description:
+ - Run a test after new target is created
+ type: bool
+ required: false
+ default: true
+ state:
+ description:
+ - Query or modifies target
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create notification targets
+ infini_notification_target:
+ state: present
+ name: testgraylog1
+ protocol: SYSLOG
+ host: 172.31.77.214
+ port: 8067
+ facility: LOCAL7
+ transport: TCP
+ visibility: CUSTOMER
+ post_test: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+- name: Create a new notification rule to a target
+ infini_notification_rule:
+ name: "test-rule-to-target" # this need to be uniq
+ event_level:
+ - ERROR
+ - CRITICAL
+ include_events:
+ - ACTIVATION_PAUSED
+ exclude_events:
+ - ACTIVE_DIRECTORY_ALL_DOMAIN_CONTROLLERS_DOWN
+ - ACTIVE_DIRECTORY_LEFT
+ target: testgraylog1
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+"""
+
+# RETURN = r''' # '''
+
+# -*- coding: utf-8 -*-
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ merge_two_dicts,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def get_target(module):
+ """
+ Find and return config setting value
+ Use disable_fail when we are looking for config
+ and it may or may not exist and neither case is an error.
+ """
+ name = module.params['name']
+ path = f"notifications/targets?name={name}"
+ system = get_system(module)
+
+ try:
+ target = system.api.get(path=path)
+ except APICommandFailed as err:
+ msg = f"Cannot find notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+ if not target:
+ msg = f"Users repository {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ result = target.get_result()
+ return result
+
+
+def handle_stat(module):
+ """Return config stat"""
+ name = module.params['name']
+ try:
+ result = get_target(module)[0]
+ except IndexError:
+ module.fail_json(f"Cannot stat notification target {name}. Target not found.")
+ result2 = {
+ "changed": False,
+ "msg": f"Found notification target {name}",
+ }
+ result = merge_two_dicts(result, result2)
+ module.exit_json(**result)
+
+
+@api_wrapper
+def find_target_id(module, system):
+ """ Find the ID of the target by name """
+ target_name = module.params["name"]
+
+ try:
+ path = f"notifications/targets?name={target_name}&fields=id"
+ api_result = system.api.get(path=path)
+ except APICommandFailed as err:
+ msg = f"Cannot find ID for notification target {target_name}: {err}"
+ module.fail_json(msg=msg)
+
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ target_id = result['id']
+ else:
+ target_id = None
+ return target_id
+
+
+@api_wrapper
+def delete_target(module):
+ """ Delete a notification target """
+ system = get_system(module)
+ name = module.params["name"]
+ target_id = find_target_id(module, system)
+
+ try:
+ path = f"notifications/targets/{target_id}?approved=true"
+ system.api.delete(path=path)
+ except APICommandFailed as err:
+ msg = f"Cannot delete notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+
+@api_wrapper
+def create_target(module):
+ """ Create a new notifition target """
+ system = get_system(module)
+ name = module.params["name"]
+ protocol = module.params["protocol"]
+ host = module.params["host"]
+ port = module.params["port"]
+ facility = module.params["facility"]
+ transport = module.params["transport"]
+ post_test = module.params["post_test"]
+ visibility = module.params["visibility"]
+
+ path = "notifications/targets"
+
+ json_data = {
+ "name": name,
+ "protocol": protocol,
+ "host": host,
+ "port": port,
+ "facility": facility,
+ "transport": transport,
+ "visibility": visibility
+ }
+
+ try:
+ system.api.post(path=path, data=json_data)
+ except APICommandFailed as err:
+ msg = f"Cannot create notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+ if post_test:
+ target_id = find_target_id(module, system)
+ path = f"notifications/targets/{target_id}/test"
+ json_data = {}
+ try:
+ system.api.post(path=path, data=json_data)
+ except APICommandFailed as err:
+ msg = f"Cannot test notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+
+@api_wrapper
+def update_target(module):
+ """ Update an existing target. """
+ delete_target(module)
+ create_target(module)
+
+
+def handle_present(module):
+ """Make config present"""
+ system = get_system(module)
+ name = module.params["name"]
+ changed = False
+ if not module.check_mode:
+ target_id = find_target_id(module, system)
+ if not target_id:
+ create_target(module)
+ msg = f"Target {name} created"
+ else:
+ update_target(module)
+ msg = f"Target {name} deleted and recreated"
+ changed = True
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """Make config present"""
+ changed = False
+ name = module.params["name"]
+ system = get_system(module)
+ target_id = find_target_id(module, system)
+
+ if not target_id:
+ msg = f"Target {name} already does not exist"
+ changed = False
+ else:
+ msg = f"Target {name} has been deleted"
+ if not module.check_mode:
+ changed = True
+ delete_target(module)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """ Determine which state function to execute and do so """
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """ Verify module options are sane """
+ if module.params['protocol'] != "SYSLOG":
+ module.fail_json(msg="The only supported protocol is SYSLOG")
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "name": {"required": True},
+ "host": {"required": False},
+ "port": {"required": False, "type": "int", "default": 514},
+ "transport": {"required": False, "default": "UDP", "choices": ["UDP", "TCP"]},
+ "protocol": {"required": False, "default": "SYSLOG", "choices": ["SYSLOG"]},
+ "facility": {"required": False, "default": "LOCAL7", "choices": ["LOCAL0", "LOCAL1", "LOCAL2", "LOCAL3", "LOCAL4", "LOCAL5", "LOCAL6", "LOCAL7"]},
+ "visibility": {"required": False, "default": "CUSTOMER", "choices": ["CUSTOMER", "INFINIDAT"]},
+ "post_test": {"required": False, "default": True, "type": "bool"},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
index d02657a19..43daa71be 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies pools on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -77,7 +81,7 @@ EXAMPLES = r'''
- name: Disable SSD Cache on pool
infini_pool:
name: foo
- ssd_cache: no
+ ssd_cache: false
user: admin
password: secret
system: ibox001
@@ -85,7 +89,7 @@ EXAMPLES = r'''
- name: Disable Compression on pool
infini_pool:
name: foo
- compression: no
+ compression: false
user: admin
password: secret
system: ibox001
@@ -95,7 +99,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -114,7 +117,7 @@ except ImportError:
@api_wrapper
def create_pool(module, system):
- """Create Pool"""
+ """ Create Pool """
name = module.params['name']
size = module.params['size']
vsize = module.params['vsize']
@@ -141,8 +144,8 @@ def create_pool(module, system):
@api_wrapper
-def update_pool(module, system, pool):
- """Update Pool"""
+def update_pool(module, pool):
+ """ Update Pool """
changed = False
size = module.params['size']
@@ -184,23 +187,21 @@ def update_pool(module, system, pool):
@api_wrapper
def delete_pool(module, pool):
- """Delete Pool"""
+ """ Delete Pool """
if not module.check_mode:
pool.delete()
msg = 'Pool deleted'
module.exit_json(changed=True, msg=msg)
-def get_sys_pool(module):
+def handle_stat(module):
+ """ Show details about a pool """
system = get_system(module)
pool = get_pool(module, system)
- return (system, pool)
-
-def handle_stat(module):
- system, pool = get_sys_pool(module)
+ name = module.params['name']
if not pool:
- module.fail_json(msg='Pool {0} not found'.format(module.params['name']))
+ module.fail_json(msg=f'Pool {name} not found')
fields = pool.get_fields()
# print('fields: {0}'.format(fields))
free_physical_capacity = fields.get('free_physical_capacity', None)
@@ -216,17 +217,21 @@ def handle_stat(module):
def handle_present(module):
- system, pool = get_sys_pool(module)
+ """ Create pool """
+ system = get_system(module)
+ pool = get_pool(module, system)
if not pool:
create_pool(module, system)
module.exit_json(changed=True, msg="Pool created")
else:
- changed = update_pool(module, system, pool)
+ changed = update_pool(module, pool)
module.exit_json(changed=changed, msg="Pool updated")
def handle_absent(module):
- system, pool = get_sys_pool(module)
+ """ Remove pool """
+ system = get_system(module)
+ pool = get_pool(module, system)
if not pool:
module.exit_json(changed=False, msg="Pool already absent")
else:
@@ -235,6 +240,7 @@ def handle_absent(module):
def execute_state(module):
+ """Determine which state function to execute and do so"""
state = module.params['state']
try:
if state == 'stat':
@@ -244,13 +250,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
@@ -274,13 +281,13 @@ def main():
if module.params['size']:
try:
Capacity(module.params['size'])
- except Exception:
+ except Exception: # pylint: disable=broad-exception-caught
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
if module.params['vsize']:
try:
Capacity(module.params['vsize'])
- except Exception:
+ except Exception: # pylint: disable=broad-exception-caught
module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
execute_state(module)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
index 303127260..6502045a9 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position,multiple-statements
+
+"""This module manages ports on an Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -11,7 +15,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_port
-version_added: '2.9.0'
+version_added: 2.9.0
short_description: Add and Delete fiber channel and iSCSI ports to a host on Infinibox
description:
- This module adds or deletes fiber channel or iSCSI ports to hosts on
@@ -21,6 +25,7 @@ options:
host:
description:
- Host Name
+ type: str
required: true
state:
description:
@@ -57,7 +62,7 @@ EXAMPLES = r'''
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
iqns:
- - "iqn.yyyy-mm.reverse-domain:unique-string"
+ - "iqn.yyyy-mm.reverse-domain:unique-string"
system: ibox01
user: admin
password: secret
@@ -67,8 +72,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -135,6 +138,7 @@ def delete_ports(module, system):
def get_sys_host(module):
+ """ Get parameters """
system = get_system(module)
host = get_host(module, system)
return (system, host)
@@ -168,12 +172,12 @@ def find_host_initiators_data(module, system, host, initiator_type):
Only include desired initiator keys for each initiator.
Return the filtered and edited host initiator list.
"""
- request = "initiators?page=1&page_size=1000&host_id={0}".format(host.id)
+ request = f"initiators?page=1&page_size=1000&host_id={host.id}"
# print("\nrequest:", request, "initiator_type:", initiator_type)
get_initiators_result = system.api.get(request, check_version=False)
result_code = get_initiators_result.status_code
if result_code != 200:
- msg = "get initiators REST call failed. code: {0}".format(result_code)
+ msg = f"get initiators REST call failed. code: {result_code}"
module.fail_json(msg=msg)
# Only return initiators of the desired type.
@@ -208,7 +212,7 @@ def find_host_initiators_data(module, system, host, initiator_type):
return host_initiators_by_type
-def get_port_fields(module, system, host):
+def get_port_fields(module, system, host): # pylint: disable=too-many-locals
"""
Return a dict with desired fields from FC and ISCSI ports associated with the host.
"""
@@ -297,13 +301,12 @@ def handle_stat(module):
Return json with status.
"""
system, host = get_sys_host(module)
-
host_name = module.params["host"]
if not host:
- module.fail_json(msg="Host {0} not found".format(host_name))
+ module.fail_json(msg=f"Host {host_name} not found")
field_dict = get_port_fields(module, system, host)
- result = dict(changed=False, msg="Host {0} ports found".format(host_name),)
+ result = dict(changed=False, msg=f"Host {host_name} ports found")
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
@@ -313,16 +316,15 @@ def handle_present(module):
Handle present state. Fail if host is None.
"""
system, host = get_sys_host(module)
-
host_name = module.params["host"]
if not host:
- module.fail_json(msg="Host {0} not found".format(host_name))
+ module.fail_json(msg=f"Host {host_name} not found")
changed = update_ports(module, system)
if changed:
- msg = "Mapping created for host {0}".format(host.get_name())
+ msg = f"Mapping created for host {host_name}"
else:
- msg = "No mapping changes were required for host {0}".format(host.get_name())
+ msg = f"No mapping changes were required for host {host_name}"
result = dict(changed=changed, msg=msg,)
module.exit_json(**result)
@@ -333,18 +335,17 @@ def handle_absent(module):
Handle absent state. Fail if host is None.
"""
system, host = get_sys_host(module)
+ host_name = module.params["host"]
if not host:
module.exit_json(
- changed=False, msg="Host {0} not found".format(module.params["host"])
+ changed=False, msg=f"Host {host_name} not found"
)
changed = delete_ports(module, system)
if changed:
- msg = "Mapping removed from host {0}".format(host.get_name())
+ msg = f"Mapping removed from host {host_name}"
else:
- msg = "No mapping changes were required. Mapping already removed from host {0}".format(
- host.get_name()
- )
+ msg = f"No mapping changes were required. Mapping already removed from host {host_name}"
result = dict(changed=changed, msg=msg,)
module.exit_json(**result)
@@ -364,26 +365,21 @@ def execute_state(module):
handle_absent(module)
else:
module.fail_json(
- msg="Internal handler error. Invalid state: {0}".format(state)
+ msg=f"Internal handler error. Invalid state: {state}"
)
finally:
system = get_system(module)
system.logout()
-def check_options(module):
- pass
-
-
def main():
"""
Gather auguments and manage mapping of vols to hosts.
"""
argument_spec = infinibox_argument_spec()
- null_list = list()
argument_spec.update(
dict(
- host=dict(required=True, type=str),
+ host=dict(required=True, type="str"),
state=dict(default="present", choices=["stat", "present", "absent"]),
wwns=dict(type="list", elements="str", default=list()),
iqns=dict(type="list", elements="str", default=list()),
@@ -395,7 +391,6 @@ def main():
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib("infinisdk"))
- check_options(module)
execute_state(module)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py
new file mode 100644
index 000000000..66219e08b
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" Manage SSO """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_sso
+version_added: 2.16.0
+short_description: Configures or queries SSO on Infinibox
+description:
+ - This module configures (present state) or gets information about (absent state) SSO on Infinibox
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Sets a name to reference the SSO by.
+ required: true
+ type: str
+ issuer:
+ description:
+ - URI of the SSO issuer.
+ required: false
+ type: str
+ sign_on_url:
+ description:
+ - URL for sign on.
+ type: str
+ required: false
+ signed_assertion:
+ description:
+ - Signed assertion
+ type: bool
+ required: false
+ default: false
+ signed_response:
+ description:
+ - Signed response
+ required: false
+ type: bool
+ default: false
+ signing_certificate:
+ description:
+ - Signing certificate content.
+ type: str
+ required: false
+ enabled:
+ description:
+ - Determines if the SSO is enabled.
+ required: false
+ default: true
+ type: bool
+ state:
+ description:
+ - Creates/Modifies the SSO, when using state present.
+ - For state absent, the SSO is removed.
+ - State stat shows the existing SSO's details.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Configure SSO
+ infini_sso:
+ name: OKTA
+ enabled: true
+ issuer: "http://www.okta.com/eykRra384o32rrTs"
+ sign_on_url: "https://infinidat.okta.com/app/infinidat_psus/exkra32oyyU6KCUCk2p7/sso/saml"
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Stat SSO
+ infini_sso:
+ name: OKTA
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Clear SSO configuration
+ infini_sso:
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ api_wrapper,
+ merge_two_dicts,
+ get_system,
+ infinibox_argument_spec,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def find_sso(module, name):
+ """ Find a SSO using its name """
+ path = f"config/sso/idps?name={name}"
+
+ try:
+ system = get_system(module)
+ sso_result = system.api.get(path=path).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot find SSO identity provider {name}: {err}"
+ module.fail_json(msg=msg)
+
+ return sso_result
+
+
+def handle_stat(module):
+ """ Handle the stat state """
+ name = module.params["name"]
+ sso_result = find_sso(module, name)
+ if not sso_result:
+ msg = f"SSO identity provider {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=False,
+ msg=f"SSO identity provider {name} stat found"
+ )
+
+ result = merge_two_dicts(result, sso_result[0])
+ result['signing_certificate'] = "redacted"
+ module.exit_json(**result)
+
+
+def handle_present(module): # pylint: disable=too-many-locals
+ """ Handle the present state """
+ enabled = module.params['enabled']
+ issuer = module.params['issuer']
+ sign_on_url = module.params['sign_on_url']
+ signed_assertion = module.params['signed_assertion']
+ signed_response = module.params['signed_response']
+ signing_certificate = module.params['signing_certificate']
+ name = module.params['name']
+
+ existing_sso = find_sso(module, name)
+ if existing_sso:
+ existing_sso_id = existing_sso[0]['id']
+ delete_sso(module, existing_sso_id)
+
+ path = "config/sso/idps"
+ data = {
+ "enabled": enabled,
+ "issuer": issuer,
+ "name": name,
+ "sign_on_url": sign_on_url,
+ "signed_assertion": signed_assertion,
+ "signed_response": signed_response,
+ "signing_certificate": signing_certificate,
+ }
+
+ try:
+ system = get_system(module)
+ sso_result = system.api.post(path=path, data=data).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot configure SSO identity provider named {name}: {err}"
+ module.fail_json(msg=msg)
+
+ if not existing_sso:
+ msg = f"SSO identity provider named {name} successfully configured"
+ else:
+ msg = f"SSO identity provider named {name} successfully removed and recreated with updated parameters"
+ result = dict(
+ changed=True,
+ msg=msg,
+ )
+ result = merge_two_dicts(result, sso_result)
+ result['signing_certificate'] = "redacted"
+
+ module.exit_json(**result)
+
+
+def delete_sso(module, sso_id):
+ """ Delete a SSO. Reference its ID. """
+ path = f"config/sso/idps/{sso_id}"
+ name = module.params["name"]
+ try:
+ system = get_system(module)
+ sso_result = system.api.delete(path=path).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot delete SSO identity provider {name}: {err}"
+ module.fail_json(msg=msg)
+ return sso_result
+
+
+def handle_absent(module):
+ """ Handle the absent state """
+ name = module.params["name"]
+ found_sso = find_sso(module, name)
+ if not found_sso:
+ result = dict(
+ changed=False,
+ msg=f"SSO {name} already not found"
+ )
+ module.exit_json(**result)
+
+ sso_id = found_sso[0]['id']
+ sso_result = delete_sso(module, sso_id)
+
+ if not sso_result:
+ msg = f"SSO identity provider named {name} with ID {sso_id} not found. Cannot delete."
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=True,
+ msg=f"SSO identity provider named {name} deleted"
+ )
+
+ result = merge_two_dicts(result, sso_result)
+ result['signing_certificate'] = "redacted"
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ signing_certificate = module.params["signing_certificate"]
+ sign_on_url = module.params["sign_on_url"]
+ state = module.params["state"]
+ is_failed = False
+ msg = ""
+ if state in ["present"]:
+ if not sign_on_url:
+ msg += "A sign_on_url parameter must be provided. "
+ is_failed = True
+ if not signing_certificate:
+ msg += "A signing_certificate parameter must be provided. "
+ is_failed = True
+ if is_failed:
+ module.fail_json(msg=msg)
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ enabled=dict(required=False, type="bool", default=True),
+ issuer=dict(required=False, default=None),
+ name=dict(required=True),
+ sign_on_url=dict(required=False, default=None),
+ signed_assertion=dict(required=False, type="bool", default=False),
+ signed_response=dict(required=False, type="bool", default=False),
+ signing_certificate=dict(required=False, default=None, no_log=True),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
index 01bcd0a5f..1915e5d87 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
@@ -1,16 +1,21 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+""" Manage Infinibox users """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_user
-version_added: '2.9.0'
+version_added: 2.9.0
short_description: Create, Delete and Modify a User on Infinibox
description:
- This module creates, deletes or modifies a user on Infinibox.
@@ -21,7 +26,7 @@ options:
- The new user's Name. Once a user is created, the user_name may not be
changed from this module. It may be changed from the UI or from
infinishell.
- required: true
+ required: false
type: str
user_email:
description:
@@ -52,12 +57,42 @@ options:
type: str
state:
description:
- - Creates/Modifies user when present or removes when absent
+ - Creates/Modifies user when present or removes when absent.
+ - Use state 'login' to test user credentials.
+ - Use state 'reset' to reset a user password.
required: false
default: present
- choices: [ "stat", "reset_password", "present", "absent" ]
+ choices: [ "stat", "reset_password", "present", "absent", "login" ]
type: str
+ user_ldap_group_name:
+ description:
+ - Name of the LDAP user group
+ required: false
+ type: str
+ user_ldap_group_dn:
+ description:
+ - DN of the LDAP user group
+ required: false
+ type: str
+ user_ldap_group_ldap:
+ description:
+ - Name of the LDAP
+ required: false
+ type: str
+ user_ldap_group_role:
+ description:
+ - Role for the LDAP user group
+ choices: [ "admin", "pool_admin", "read_only" ]
+ required: false
+ type: str
+ user_ldap_group_pools:
+ description:
+ - A list of existing pools managed by the LDAP user group
+ default: []
+ required: false
+ type: list
+ elements: str
extends_documentation_fragment:
- infinibox
'''
@@ -81,27 +116,57 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
- HAS_INFINISDK,
api_wrapper,
infinibox_argument_spec,
get_system,
get_user,
- get_pool,
- unixMillisecondsToDate,
merge_two_dicts,
)
+
+HAS_INFINISDK = True
try:
- from infi.dtypes.iqn import make_iscsi_name
+ from infinisdk.core.exceptions import APICommandFailed
except ImportError:
- pass # Handled by HAS_INFINISDK from module_utils
+ HAS_INFINISDK = False
+
+
+@api_wrapper
+def find_user_ldap_group_id(module):
+ """
+ Find the ID of the LDAP user group by name
+ """
+ ldap_id = None
+ ldap_name = module.params["user_ldap_group_name"]
+ path = f"users?name={ldap_name}&type=eq%3ALdap"
+ system = get_system(module)
+ api_result = system.api.get(path=path)
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ ldap_id = result['id']
+ return ldap_id
+
+
+@api_wrapper
+def find_ldap_id(module):
+ """
+ Find the ID of the LDAP by name
+ """
+ ldap_id = None
+ ldap_name = module.params["user_ldap_group_ldap"]
+ path = f"config/ldap?name={ldap_name}&fields=id"
+ system = get_system(module)
+ api_result = system.api.get(path=path)
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ ldap_id = result['id']
+ return ldap_id
@api_wrapper
def create_user(module, system):
+ """ Create user """
if not module.check_mode:
user = system.users.create(name=module.params['user_name'],
password=module.params['user_password'],
@@ -120,6 +185,42 @@ def create_user(module, system):
return changed
+@api_wrapper
+def create_ldap_user_group(module):
+ """ Create ldap user group """
+ ldap_group_name = module.params['user_ldap_group_name']
+ ldap_name = module.params['user_ldap_group_ldap']
+ ldap_id = find_ldap_id(module)
+ ldap_pools = module.params['user_ldap_group_pools']
+ if not ldap_id:
+ msg = f'Cannot create LDAP group {ldap_group_name}. Cannot find ID for LDAP name {ldap_name}'
+ module.fail_json(msg=msg)
+ path = "users"
+ system = get_system(module)
+ data = {
+ "name": ldap_group_name,
+ "dn": module.params['user_ldap_group_dn'],
+ "ldap_id": ldap_id,
+ "role": module.params['user_ldap_group_role'],
+ "type": "Ldap",
+ }
+ try:
+ system.api.post(path=path, data=data)
+ except APICommandFailed as err:
+ if err.status_code in [409]:
+ msg = f'Cannot create user_ldap_group_name {ldap_group_name}: {err.message}'
+ module.fail_json(msg)
+ changed = True
+
+ user = get_user(module, system, ldap_group_name)
+ for pool_name in ldap_pools:
+ # Pylint is not finding Infinibox.pools but Python does.
+ pool = system.pools.get(name=pool_name) # pylint: disable=no-member
+ add_user_to_pool_owners(user, pool)
+
+ return changed
+
+
def add_user_to_pool_owners(user, pool):
"""
Find the current list of pool owners and add user using pool.set_owners().
@@ -127,27 +228,20 @@ def add_user_to_pool_owners(user, pool):
get owners, add user, then set owners. Further, we need to know if the
owners changed. Use sets of owners to compare.
"""
- # print("add_user_to_pool_owners(): start")
changed = False
pool_fields = pool.get_fields(from_cache=True, raw_value=True)
pool_owners = pool_fields.get('owners', [])
- # print('pool_owners:', pool_owners, 'pool_owners type:', type(pool_owners))
- # print('user:', user)
- # print('pool:', pool)
pool_owners_set = set(pool_owners)
- # print('pool_owners_set:', pool_owners_set)
new_pool_owners_set = pool_owners_set.copy()
new_pool_owners_set.add(user.id)
- # print('new_pool_owners_set:', new_pool_owners_set)
if pool_owners_set != new_pool_owners_set:
pool.set_owners([user])
changed = True
- # print("changed:", changed)
- # print("add_user_to_pool_owners(): end")
return changed
def remove_user_from_pool_owners(user, pool):
+ """ Remove user from pool owners """
changed = False
pool_fields = pool.get_fields(from_cache=True, raw_value=True)
pool_owners = pool_fields.get('owners', [])
@@ -162,9 +256,9 @@ def remove_user_from_pool_owners(user, pool):
@api_wrapper
def update_user(module, system, user):
- # print("update_user()")
+ """ Update user """
if user is None:
- raise AssertionError("Cannot update user {0}. User not found.".format(module.params["user_name"]))
+ raise AssertionError(f"Cannot update user {module.params['user_name']}. User not found.")
changed = False
fields = user.get_fields(from_cache=True, raw_value=True)
@@ -182,23 +276,34 @@ def update_user(module, system, user):
try:
pool_name = module.params['user_pool']
pool = system.pools.get(name=pool_name)
- except Exception as err:
- module.fail_json(msg='Cannot find pool {0}: {1}'.format(pool_name, err))
+ except Exception as err: # pylint: disable=broad-exception-caught
+ module.fail_json(msg=f'Cannot find pool {pool_name}: {err}')
if add_user_to_pool_owners(user, pool):
changed = True
return changed
+def update_ldap_user_group(module):
+ """ Update ldap user group by deleting and creating the LDAP user"""
+ changed = delete_ldap_user_group(module)
+ if not changed:
+ module.fail_json(msg='Cannot delete LDAP user {ldap_group_name}. Cannot find ID for LDAP group.')
+ create_ldap_user_group(module)
+ changed = True
+ return changed
+
+
@api_wrapper
-def reset_user_password(module, system, user):
- # print("update_user()")
+def reset_user_password(module, user):
+ """ Reset user's password """
if user is None:
- raise AssertionError("Cannot change user {0} password. User not found.".format(module.params["user_name"]))
+ module.fail_json(msg=f'Cannot change user {module.params["user_name"]} password. User not found.')
user.update_password(module.params['user_password'])
@api_wrapper
def delete_user(module, user):
+ """ Delete a user """
if not user:
return False
@@ -209,82 +314,196 @@ def delete_user(module, user):
return changed
-def get_sys_user(module):
+@api_wrapper
+def delete_ldap_user_group(module):
+ """ Delete a ldap user group """
+ changed = False
+ ldap_group_name = module.params['user_ldap_group_name']
+ ldap_group_id = find_user_ldap_group_id(module)
+ if not ldap_group_id:
+ changed = False
+ return changed
+ path = f"users/{ldap_group_id}?approved=yes"
system = get_system(module)
- user = get_user(module, system)
- # print("get_sys_user(): user:", user)
- return (system, user)
+ try:
+ system.api.delete(path=path)
+ changed = True
+ except APICommandFailed as err:
+ if err.status_code in [404]:
+ changed = False
+ else:
+ msg = f'An error occurred deleting user_ldap_group_name {ldap_group_name}: {err}'
+ module.fail_json(msg)
+ return changed
+
+
+def get_user_ldap_group(module):
+ """
+ Find the LDAP user group by name
+ """
+ result = None
+ user_ldap_group_name = module.params["user_ldap_group_name"]
+ path = f"users?name={user_ldap_group_name}&type=eq%3ALdap"
+ system = get_system(module)
+ api_result = system.api.get(path=path)
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ return result
def get_user_fields(user):
+ """ Get user's fields """
pools = user.get_owned_pools()
pool_names = [pool.get_field('name') for pool in pools]
fields = user.get_fields(from_cache=True, raw_value=True)
- field_dict = dict(
- id=user.id,
- enabled=fields.get('enabled', None),
- role=fields.get('role', None),
- email=fields.get('email', None),
- pools=pool_names,
- )
+ field_dict = {
+ "dn": fields.get('dn', None),
+ "email": fields.get('email', None),
+ "enabled": fields.get('enabled', None),
+ "id": user.id,
+ "ldap_id": fields.get('ldap_id', None),
+ "pools": pool_names,
+ "role": fields.get('role', None),
+ "roles": fields.get('roles', []),
+ "type": fields.get('type', None),
+ }
return field_dict
def handle_stat(module):
- system, user = get_sys_user(module)
- user_name = module.params["user_name"]
- if not user:
- module.fail_json(msg='User {0} not found'.format(user_name))
- field_dict = get_user_fields(user)
- result = dict(
- changed=False,
- msg='User stat found'
- )
+ """ Handle stat for user or LDAP group user """
+ user_name = module.params['user_name']
+ user_ldap_group_name = module.params['user_ldap_group_name']
+ if user_name:
+ system = get_system(module)
+ user = get_user(module, system)
+ user_name = module.params["user_name"]
+ if not user:
+ module.fail_json(msg=f'User {user_name} not found')
+ field_dict = get_user_fields(user)
+ msg = f'User {user_name} stat found'
+ elif user_ldap_group_name:
+ user = get_user_ldap_group(module)
+ if not user:
+ module.fail_json(msg=f'user_ldap_group_name {user_ldap_group_name} not found')
+ field_dict = get_user_fields(user)
+ msg = f'User LDAP group {user_ldap_group_name} stat found'
+ else:
+ msg = 'Neither user_name nor user_ldap_group_name were provided for state stat'
+ module.fail_json(msg)
+
+ result = {
+ "changed": False,
+ "msg": msg,
+ }
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
def handle_present(module):
- system, user = get_sys_user(module)
+ """ Handle making user present """
user_name = module.params["user_name"]
- if not user:
- changed = create_user(module, system)
- msg = 'User {0} created'.format(user_name)
- else:
- changed = update_user(module, system, user)
- if changed:
- msg = 'User {0} updated'.format(user_name)
+ user_ldap_group_name = module.params["user_ldap_group_name"]
+ changed = False
+ msg = 'Message not set'
+
+ if user_name:
+ system = get_system(module)
+ user = get_user(module, system)
+ if not user:
+ changed = create_user(module, system)
+ msg = f'User {user_name} created'
+ else:
+ changed = update_user(module, system, user)
+ if changed:
+ msg = f'User {user_name} updated'
+ else:
+ msg = f'User {user_name} update required no changes'
+ elif user_ldap_group_name:
+ ldap_user = get_user_ldap_group(module)
+ if not ldap_user:
+ changed = create_ldap_user_group(module)
+ msg = f'LDAP user group {user_ldap_group_name} created'
else:
- msg = 'User {0} update required no changes'.format(user_name)
+ changed = update_ldap_user_group(module)
+ if changed:
+ msg = f'LDAP user group {user_ldap_group_name} updated by deleting and recreating with updated parameters'
+ else:
+ msg = f'LDAP user group {user_ldap_group_name} update not required - no changes'
+ else:
+ msg = 'Neither user_name nor user_ldap_group_name were provided'
+ module.fail_json(msg)
+
module.exit_json(changed=changed, msg=msg)
def handle_absent(module):
- system, user = get_sys_user(module)
- user_name = module.params["user_name"]
- if not user:
- changed = False
- msg = "User {0} already absent".format(user_name)
+ """ Handle making user absent """
+ user_name = module.params['user_name']
+ user_ldap_group_name = module.params['user_ldap_group_name']
+ if user_name:
+ system = get_system(module)
+ user = get_user(module, system)
+ user_name = module.params["user_name"]
+ if not user:
+ changed = False
+ msg = f"User {user_name} already absent"
+ else:
+ changed = delete_user(module, user)
+ msg = f"User {user_name} removed"
+ module.exit_json(changed=changed, msg=msg)
+ elif user_ldap_group_name:
+ changed = delete_ldap_user_group(module)
+ if changed:
+ msg = f"LDAP group user {user_ldap_group_name} removed"
+ else:
+ msg = f"LDAP group user {user_ldap_group_name} already absent"
+ module.exit_json(changed=changed, msg=msg)
else:
- changed = delete_user(module, user)
- msg = "User {0} removed".format(user_name)
- module.exit_json(changed=changed, msg=msg)
+ msg = 'Neither user_name nor user_ldap_group_name were provided for state absent'
+ module.fail_json(msg)
def handle_reset_password(module):
- system, user = get_sys_user(module)
+ """ Reset user password """
+ system = get_system(module)
+ user = get_user(module, system)
user_name = module.params["user_name"]
if not user:
- msg = 'Cannot change password. User {0} not found'.format(user_name)
+ msg = f'Cannot change password. User {user_name} not found'
module.fail_json(msg=msg)
else:
- reset_user_password(module, system, user)
- msg = 'User {0} password changed'.format(user_name)
+ reset_user_password(module, user)
+ msg = f'User {user_name} password changed'
module.exit_json(changed=True, msg=msg)
+def handle_login(module):
+ """ Test user credentials by logging in """
+ system = get_system(module)
+ user_name = module.params["user_name"]
+ user_password = module.params['user_password']
+ path = "users/login"
+ data = {
+ "username": user_name,
+ "password": user_password,
+ }
+ try:
+ login = system.api.post(path=path, data=data)
+ except APICommandFailed:
+ msg = f'User {user_name} failed to login'
+ module.fail_json(msg=msg)
+ if login.status_code == 200:
+ msg = f'User {user_name} successfully logged in'
+ module.exit_json(changed=False, msg=msg)
+ else:
+ msg = f'User {user_name} failed to login with status code: {login.status_code}'
+ module.fail_json(msg=msg)
+
+
def execute_state(module):
+ """ Find state and handle it """
state = module.params['state']
try:
if state == 'stat':
@@ -295,47 +514,85 @@ def execute_state(module):
handle_absent(module)
elif state == 'reset_password':
handle_reset_password(module)
+ elif state == 'login':
+ handle_login(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
-def check_options(module):
+def check_options(module): # pylint: disable=too-many-branches
+ """ Check option logic """
state = module.params['state']
+ user_name = module.params['user_name']
user_role = module.params['user_role']
user_pool = module.params['user_pool']
+ user_ldap_group_name = module.params['user_ldap_group_name']
+ user_ldap_group_role = module.params['user_ldap_group_role']
if state == 'present':
if user_role == 'pool_admin' and not user_pool:
module.fail_json(msg='user_role "pool_admin" requires a user_pool to be provided')
if user_role != 'pool_admin' and user_pool:
module.fail_json(msg='Only user_role "pool_admin" should have a user_pool provided')
- valid_keys = ['user_email', 'user_password', 'user_role', 'user_enabled']
- for valid_key in valid_keys:
- # Check required keys provided
- try:
- not_used = module.params[valid_key]
- except KeyError:
- msg = 'For state "present", options {0} are required'.format(", ".join(valid_keys))
- module.fail_json(msg=msg)
- elif state == 'reset_password':
- if not module.params['user_password']:
- msg = 'For state "reset_password", user_password is required'
+ if not user_name and not user_ldap_group_name:
+ msg = 'For state "present", option user_name or user_ldap_group_name is required'
+ module.fail_json(msg=msg)
+
+ if user_name and user_ldap_group_name:
+ msg = 'For state "present", option user_name and user_ldap_group_name cannot both be provided'
+ module.fail_json(msg=msg)
+
+ if user_name:
+ required_user_params = [
+ 'user_email', 'user_password', 'user_role',
+ ]
+ for required_param in required_user_params:
+ param = module.params[required_param]
+ if param is None:
+ msg = f"For state 'present', option {required_param} is required with option user_name"
+ module.fail_json(msg=msg)
+
+ if user_ldap_group_name:
+ required_user_ldap_params = [
+ 'user_ldap_group_dn', 'user_ldap_group_ldap', 'user_ldap_group_role',
+ ]
+ for required_param in required_user_ldap_params:
+ param = module.params[required_param]
+ if not param:
+ msg = f'For state "present", option {required_param} is required with option user_ldap_group_name'
+ module.fail_json(msg=msg)
+ if user_ldap_group_role == 'pool_admin':
+ user_ldap_group_pools = module.params['user_ldap_group_pools']
+ if not user_ldap_group_pools:
+ msg = "For state 'present' and user_ldap_group_role 'pool_admin', user_ldap_group_pool must specify one or more pools"
+ module.fail_json(msg=msg)
+
+ elif state in ['reset_password', 'login']:
+ if not module.params['user_name'] or not module.params['user_password']:
+ msg = f"For state '{state}', user_name and user_password are both required"
+ module.fail_json(msg=msg)
def main():
+ """ main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- user_name=dict(required=True),
- user_email=dict(required=False),
- user_password=dict(required=False, no_log=True),
- user_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only']),
+ user_name=dict(required=False),
+ user_email=dict(required=False, default=None),
+ user_password=dict(required=False, no_log=True, default=None),
+ user_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only'], default=None),
user_enabled=dict(required=False, type='bool', default=True),
- user_pool=dict(required=False),
- state=dict(default='present', choices=['stat', 'reset_password', 'present', 'absent']),
+ user_pool=dict(required=False, default=None),
+ user_ldap_group_name=dict(required=False, default=None),
+ user_ldap_group_dn=dict(required=False, default=None),
+ user_ldap_group_ldap=dict(required=False, default=None),
+ user_ldap_group_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only'], default=None),
+ user_ldap_group_pools=dict(required=False, type='list', elements='str', default=[]),
+ state=dict(default='present', choices=['stat', 'reset_password', 'present', 'absent', 'login']),
)
)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py
new file mode 100644
index 000000000..bbc2a8d12
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py
@@ -0,0 +1,534 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position,multiple-statements
+
+"""This module creates, deletes or modifies repositories of users that can log on to an Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_users_repository
+version_added: 2.13.0
+short_description: Create, Delete or Modify respositories of users that can log on to an Infinibox
+description:
+ - This module creates, deletes or modifies respositories of users that can log on to an Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ ad_auto_discover_servers:
+ description:
+ - AD auto discover servers
+ type: bool
+ choices: [true, false]
+ required: false
+ default: true
+ ad_domain_name:
+ description:
+ - AD domain name
+ type: str
+ required: false
+ bind_password:
+ description:
+ - The bind user password
+ type: str
+ required: false
+ bind_username:
+ description:
+ - The bind username
+ type: str
+ required: false
+ servers:
+ description:
+ - A list of LDAP servers. For an empty list, use [].
+ required: false
+ type: list
+ elements: str
+ default: []
+ name:
+ description:
+ - Name of repository
+ type: str
+ required: true
+ ldap_port:
+ description:
+ - LDAP or AD port to use
+ type: int
+ required: false
+ default: 636
+ ldap_servers:
+ description:
+ - List of LDAP or AD servers
+ type: list
+ elements: str
+ required: false
+ default: []
+ repository_type:
+ description:
+ - The type of repository
+ choices: ["ActiveDirectory", "LDAP"]
+ type: str
+ required: False
+ schema_group_memberof_attribute:
+ description:
+ - Schema group memberof attribute
+ type: str
+ required: false
+ schema_group_name_attribute:
+ description:
+ - Schema group name attribute
+ type: str
+ required: false
+ schema_groups_basedn:
+ description:
+ - Schema groups base DN
+ type: str
+ required: false
+ schema_group_class:
+ description:
+ - Schema group class
+ type: str
+ required: false
+ schema_users_basedn:
+ description:
+ - Schema user base DN
+ type: str
+ required: false
+ schema_user_class:
+ description:
+ - Schema user class
+ type: str
+ required: false
+ schema_username_attribute:
+ description:
+ - Schema username attribute
+ type: str
+ required: false
+ state:
+ description:
+ - Creates/Modifies users repositories when present or removes when absent.
+ - When getting the stats for a users repository, the module will test
+ connectivity to the repository and report the result in 'test_ok' as true or false.
+ required: false
+ type: str
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ use_ldaps:
+ description:
+ - Use SSL (LDAPS)
+ type: bool
+ choices: ["true", "false"]
+ default: true
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create AD
+ infini_users_repository:
+ name: PSUS_ANSIBLE_ad
+ bind_password: tuFrAxahuYe4
+ bind_username: conldap
+ ad_domain_name: infinidat.com
+ repository_type: ActiveDirectory
+ schema_group_class: group
+ schema_group_memberof_attribute: memberof
+ schema_group_name_attribute: cn
+ schema_groups_basedn:
+ schema_user_class: user
+ schema_username_attribute: sAMAccountName
+ state: present
+ system: 172.20.67.167
+ user: dohlemacher
+ password: 123456
+
+- name: Stat AD
+ infini_users_repository:
+ name: PSUS_ANSIBLE_ad
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Remove AD
+ infini_users_repository:
+ name: PSUS_ANSIBLE_ad
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ api_wrapper,
+ get_system,
+ infinibox_argument_spec,
+)
+
+HAS_INFINISDK = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ HAS_INFINISDK = False
+
+
+@api_wrapper
+def get_users_repository(module, disable_fail=False):
+ """
+ Find and return users repository information
+ Use disable_fail when we are looking for an user repository
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ name = module.params["name"]
+
+ path = f"config/ldap?name={name}"
+ repo = system.api.get(path=path)
+
+ if repo:
+ result = repo.get_result()
+ if not disable_fail and not result:
+ msg = f"Users repository {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ return result
+
+ if not disable_fail:
+ msg = f"Users repository {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+
+ return None
+
+
+@api_wrapper
+def test_users_repository(module, repository_id, disable_fail=False):
+ """
+ Find and return users repository information
+ Use disable_fail when we are looking for an user repository
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ name = module.params['name']
+ try:
+ path = f"config/ldap/{repository_id}/test"
+ result = system.api.post(path=path)
+ except APICommandFailed as err:
+ if disable_fail:
+ return False
+ msg = f"Users repository {name} testing failed: {str(err)}"
+ module.fail_json(msg=msg)
+ if result.response.status_code in [200]:
+ return True
+ return False
+
+
+def create_post_data(module):
+ """Create data dict for post rest calls"""
+ name = module.params["name"]
+ repo_type = module.params["repository_type"]
+ # search_order
+ schema_definition = {
+ "group_class": module.params["schema_group_class"],
+ "group_memberof_attribute": module.params["schema_group_memberof_attribute"],
+ "group_name_attribute": module.params["schema_group_name_attribute"],
+ "groups_basedn": module.params["schema_groups_basedn"],
+ "user_class": module.params["schema_user_class"],
+ "username_attribute": module.params["schema_username_attribute"],
+ "users_basedn": module.params["schema_users_basedn"],
+ }
+
+ # Create json data
+ data = {
+ "bind_password": module.params["bind_password"],
+ "bind_username": module.params["bind_username"],
+ "ldap_port": module.params["ldap_port"],
+ "name": name,
+ "repository_type": repo_type,
+ "schema_definition": schema_definition,
+ "use_ldaps": module.params["use_ldaps"],
+ }
+
+ # Add type specific fields to data dict
+ if repo_type == "ActiveDirectory":
+ data["domain_name"] = module.params["ad_domain_name"]
+ data["servers"] = []
+ else: # LDAP
+ data["domain_name"]: None
+ data["servers"] = module.params["ldap_servers"]
+ return data
+
+
+@api_wrapper
+def post_users_repository(module):
+ """
+ Create or update users LDAP or AD repo. The changed variable is found elsewhere.
+ Variable 'changed' not returned by design
+ """
+ system = get_system(module)
+ name = module.params["name"]
+ data = create_post_data(module)
+ path = "config/ldap"
+ try:
+ system.api.post(path=path, data=data)
+ except APICommandFailed as err:
+ if err.error_code == "LDAP_NAME_CONFLICT":
+ msg = f"Users repository {name} conflicts."
+ module.fail_json(msg=msg)
+ elif err.error_code == "LDAP_BAD_CREDENTIALS":
+ msg = f"Cannot create users repository {name} due to incorrect LDAP credentials: {err}"
+ module.fail_json(msg=msg)
+ else:
+ msg = f"Cannot create users repository {name}: {err}"
+ module.fail_json(msg=msg)
+
+
+@api_wrapper
+def delete_users_repository(module):
+ """Delete repo."""
+ system = get_system(module)
+ name = module.params['name']
+ changed = False
+ if not module.check_mode:
+ repo = get_users_repository(module, disable_fail=True)
+ if repo and len(repo) == 1:
+ path = f"config/ldap/{repo[0]['id']}"
+ try:
+ system.api.delete(path=path)
+ changed = True
+ except APICommandFailed as err:
+ if err.status_code != 404:
+ msg = f"Deletion of users repository {name} failed: {str(err)}"
+ module.fail_json(msg=msg)
+ return changed
+
+
+def handle_stat(module):
+ """Return users repository stat"""
+ name = module.params['name']
+ repos = get_users_repository(module)
+
+ if len(repos) != 1:
+ msg = f"Users repository {name} not found in repository list {repos}. Cannot stat."
+ module.fail_json(msg=msg)
+
+ result = repos[0]
+ repository_id = result.pop("id")
+ result["msg"] = f"Stats for user repository {name}"
+ result["repository_id"] = repository_id # Rename id to repository_id
+ result["test_ok"] = test_users_repository(module, repository_id=repository_id, disable_fail=True)
+ result["changed"] = False
+ module.exit_json(**result)
+
+
+@api_wrapper
+def is_existing_users_repo_equal_to_desired(module): # pylint: disable=too-many-return-statements,multiple-statements
+ """ Compare two user user repositories. Return a bool. """
+ newdata = create_post_data(module)
+ olddata = get_users_repository(module, disable_fail=True)[0]
+ if not olddata:
+ return False
+ if olddata['bind_username'] != newdata['bind_username']:
+ return False
+ if olddata['repository_type'] != newdata['repository_type']:
+ return False
+ if olddata['domain_name'] != newdata['domain_name']:
+ return False
+ if olddata['ldap_port'] != newdata['ldap_port']:
+ return False
+ if olddata['name'] != newdata['name']:
+ return False
+ if olddata['schema_definition'] != newdata['schema_definition']:
+ return False
+ if olddata['servers'] != newdata['servers']:
+ return False
+ if olddata['use_ldaps'] != newdata['use_ldaps']:
+ return False
+ return True
+
+
+def handle_present(module):
+ """Make users repository present"""
+ name = module.params['name']
+ changed = False
+ msg = ""
+ if not module.check_mode:
+ old_users_repo = None
+ old_users_repo_result = get_users_repository(module, disable_fail=True)
+ if old_users_repo_result:
+ old_users_repo = old_users_repo_result[0]
+ if is_existing_users_repo_equal_to_desired(module):
+ msg = f"Users repository {name} already exists. No changes required."
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ msg = f"Users repository {name} is being recreated with new settings. "
+ delete_users_repository(module)
+ old_users_repo = None
+ changed = True
+
+ post_users_repository(module)
+
+ new_users_repo = get_users_repository(module)
+ changed = new_users_repo != old_users_repo
+ if changed:
+ if old_users_repo:
+ msg = f"{msg}Users repository {name} updated"
+ else:
+ msg = f"{msg}Users repository {name} created"
+ else:
+ msg = f"Users repository {name} unchanged since the value is the same as the existing users repository"
+ else:
+ msg = f"Users repository {name} unchanged due to check_mode"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """Make users repository absent"""
+ name = module.params['name']
+ msg = f"Users repository {name} unchanged"
+ changed = False
+ if not module.check_mode:
+ changed = delete_users_repository(module)
+ if changed:
+ msg = f"Users repository {name} removed"
+ else:
+ msg = f"Users repository {name} did not exist so removal was unnecessary"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ # ad_domain_name = module.params["ad_domain_name"]
+ # bind_password = module.params["bind_password"]
+ # bind_username = module.params["bind_username"]
+ # ad_domain_name = module.params["ad_domain_name"]
+ # ldap_servers = module.params["ldap_servers"]
+ name = module.params["name"]
+ # ldap_port = module.params["ldap_port"]
+ repository_type = module.params["repository_type"]
+ # schema_group_memberof_attribute = module.params["schema_group_memberof_attribute"]
+ # schema_group_name_attribute = module.params["schema_group_name_attribute"]
+ # schema_groups_basedn = module.params["schema_groups_basedn"]
+ # schema_user_class = module.params["schema_user_class"]
+ # schema_username_attribute = module.params["schema_username_attribute"]
+ # schema_users_basedn = module.params["schema_users_basedn"]
+ state = module.params["state"]
+
+ if state == "stat":
+ pass
+ elif state == "present":
+ if repository_type:
+ common_params = ["bind_password", "bind_username", "schema_group_class",
+ "schema_group_memberof_attribute", "schema_group_name_attribute",
+ "schema_user_class", "schema_username_attribute",]
+ if repository_type == "LDAP": # Creating an LDAP
+ req_params = common_params
+ missing_params = [param for param in req_params if not is_set_in_params(module, param)]
+ if missing_params:
+ msg = f"Cannot create a new LDAP repository named {name} without providing required parameters: {missing_params}"
+ module.fail_json(msg=msg)
+
+ disallowed_params = ["ad_domain_name", "ad_auto_discover_servers"]
+ error_params = [param for param in disallowed_params if is_set_in_params(module, param)]
+ if error_params:
+ msg = f"Cannot create a new LDAP repository named {name} when providing disallowed parameters: {error_params}"
+ module.fail_json(msg=msg)
+ elif repository_type == "ActiveDirectory":
+ req_params = common_params
+ missing_params = [param for param in req_params if not is_set_in_params(module, param)]
+ if missing_params:
+ msg = f"Cannot create a new LDAP repository named {name} without providing required parameters: {missing_params}"
+ module.fail_json(msg=msg)
+
+ disallowed_params = ["ldap_servers"]
+ error_params = [param for param in disallowed_params if is_set_in_params(module, param)]
+ if error_params:
+ msg = f"Cannot create a new LDAP repository named {name} when providing disallowed parameters: {error_params}"
+ module.fail_json(msg=msg)
+ else:
+ msg = f"Unsupported respository type: {repository_type}"
+ module.fail_json(msg=msg)
+ else:
+ msg = "Cannot create a new users repository without providing a repository_type"
+ module.fail_json(msg=msg)
+ elif state == "absent":
+ pass
+ else:
+ module.fail_json(f"Invalid state '{state}' provided")
+
+
+def is_set_in_params(module, key):
+ """A utility function to test if a module param key is set to a truthy value.
+ Useful in list comprehensions."""
+ is_set = False
+ try:
+ if module.params[key]:
+ is_set = True
+ except KeyError:
+ pass
+ return is_set
+
+
+def main():
+ """Main module function"""
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "ad_auto_discover_servers": {"required": False, "choices": [True, False], "type": "bool", "default": True},
+ "ad_domain_name": {"required": False, "default": None},
+ "bind_password": {"required": False, "default": None, "no_log": True},
+ "bind_username": {"required": False, "default": None},
+ "ldap_servers": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "name": {"required": True},
+ "ldap_port": {"required": False, "type": "int", "default": 636},
+ "repository_type": {"required": False, "choices": ["LDAP", "ActiveDirectory"], "default": None},
+ "schema_group_class": {"required": False, "default": None},
+ "schema_group_memberof_attribute": {"required": False, "default": None},
+ "schema_group_name_attribute": {"required": False, "default": None},
+ "schema_groups_basedn": {"required": False, "default": None},
+ "schema_user_class": {"required": False, "default": None},
+ "schema_username_attribute": {"required": False, "default": None},
+ "schema_users_basedn": {"required": False, "default": None},
+ "servers": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ "use_ldaps": {"required": False, "choices": [True, False], "type": "bool", "default": True},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
index 0c4a579bc..df5b0e756 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
@@ -1,14 +1,18 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+""" A module for managing Infinibox volumes """
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: infini_vol
version_added: '2.3.0'
@@ -19,24 +23,34 @@ author: David Ohlemacher (@ohlemacher)
options:
name:
description:
- - Volume Name
- required: true
+ - Volume name.
+ type: str
+ required: false
+ serial:
+ description:
+ - Volume serial number.
+ type: str
+ required: false
parent_volume_name:
description:
- Specify a volume name. This is the volume parent for creating a snapshot. Required if volume_type is snapshot.
+ type: str
required: false
pool:
description:
- Pool that master volume will reside within. Required for creating a master volume, but not a snapshot.
+ type: str
required: false
size:
description:
- Volume size in MB, GB or TB units. Required for creating a master volume, but not a snapshot
+ type: str
required: false
snapshot_lock_expires_at:
description:
- This will cause a snapshot to be locked at the specified date-time.
Uses python's datetime format YYYY-mm-dd HH:MM:SS.ffffff, e.g. 2020-02-13 16:21:59.699700
+ type: str
required: false
snapshot_lock_only:
description:
@@ -47,6 +61,7 @@ options:
state:
description:
- Creates/Modifies master volume or snapshot when present or removes when absent.
+ type: str
required: false
default: present
choices: [ "stat", "present", "absent" ]
@@ -59,12 +74,14 @@ options:
write_protected:
description:
- Specifies if the volume should be write protected. Default will be True for snapshots, False for regular volumes.
+ type: str
required: false
default: "Default"
choices: ["Default", "True", "False"]
volume_type:
description:
- Specifies the volume type, regular volume or snapshot.
+ type: str
required: false
default: master
choices: [ "master", "snapshot" ]
@@ -72,21 +89,23 @@ options:
description:
- Specify true to restore a volume (parent_volume_name) from an existing snapshot specified by the name field.
- State must be set to present and volume_type must be 'snapshot'.
+ type: bool
required: false
default: false
+
extends_documentation_fragment:
- infinibox
requirements:
- capacity
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create new volume named foo under pool named bar
infini_vol:
name: foo
# volume_type: master # Default
size: 1TB
- thin_provision: yes
+ thin_provision: true
pool: bar
state: present
user: admin
@@ -115,25 +134,30 @@ EXAMPLES = r'''
user: admin
password: secret
system: ibox001
-'''
+"""
# RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
- infinibox_argument_spec,
- ObjectNotFound,
+ check_snapshot_lock_options,
get_pool,
get_system,
+ get_vol_by_sn,
get_volume,
- get_vol_sn,
+ infinibox_argument_spec,
+ manage_snapshot_locks,
)
+HAS_INFINISDK = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+ from infinisdk.core.exceptions import ObjectNotFound
+except ImportError:
+ HAS_INFINISDK = False
HAS_CAPACITY = True
try:
@@ -141,19 +165,10 @@ try:
except ImportError:
HAS_CAPACITY = False
-HAS_ARROW = True
-try:
- import arrow
-except ImportError:
- HAS_ARROW = False
-
-except Exception:
- HAS_INFINISDK = False
-
@api_wrapper
def create_volume(module, system):
- """Create Volume"""
+ """ Create Volume """
changed = False
if not module.check_mode:
if module.params["thin_provision"]:
@@ -179,24 +194,21 @@ def create_volume(module, system):
@api_wrapper
def find_vol_id(module, system, vol_name):
- """
- Find the ID of this vol
- """
- vol_url = "volumes?name={0}&fields=id".format(vol_name)
+ """ Find the ID of this vol """
+ vol_url = f"volumes?name={vol_name}&fields=id"
vol = system.api.get(path=vol_url)
result = vol.get_json()["result"]
if len(result) != 1:
- module.fail_json("Cannot find a volume with name '{0}'".format(vol_name))
+ module.fail_json(f"Cannot find a volume with name '{vol_name}'")
vol_id = result[0]["id"]
- # print("Volume {} has ID {}".format(vol_name, vol_id))
return vol_id
@api_wrapper
def restore_volume_from_snapshot(module, system):
- """Use snapshot to restore a volume"""
+ """ Use snapshot to restore a volume """
changed = False
is_restoring = module.params["restore_volume_from_snapshot"]
volume_type = module.params["volume_type"]
@@ -209,62 +221,62 @@ def restore_volume_from_snapshot(module, system):
if not is_restoring:
raise AssertionError("A programming error occurred. is_restoring is not True")
if volume_type != "snapshot":
- module.fail_json(
- msg="Cannot restore a parent volume from snapshot unless the volume "
- "type is 'snapshot'"
- )
+ module.exit_json(msg="Cannot restore a parent volume from snapshot unless the volume type is 'snapshot'")
if not parent_volume_name:
- module.fail_json(
- msg="Cannot restore a parent volume from snapshot unless the parent "
- "volume name is specified"
- )
+ module.exit_json(msg="Cannot restore a parent volume from snapshot unless the parent volume name is specified")
if not module.check_mode:
- restore_url = "volumes/{0}/restore?approved=true".format(parent_volume_id)
+ restore_url = f"volumes/{parent_volume_id}/restore?approved=true"
restore_data = {
"source_id": snap_id,
}
- restore = system.api.post(path=restore_url, data=restore_data)
- changed = True
+ try:
+ system.api.post(path=restore_url, data=restore_data)
+ changed = True
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot restore volume {parent_volume_name} from {snap_name}: {err}")
return changed
@api_wrapper
def update_volume(module, volume):
- """Update Volume"""
+ """ Update Volume """
changed = False
+
+ if module.check_mode:
+ return changed
+
if module.params["size"]:
size = Capacity(module.params["size"]).roundup(64 * KiB)
if volume.get_size() != size:
- if not module.check_mode:
- volume.update_size(size)
+ volume.update_size(size)
changed = True
if module.params["thin_provision"] is not None:
- type = str(volume.get_provisioning())
- if type == "THICK" and module.params["thin_provision"]:
- if not module.check_mode:
- volume.update_provisioning("THIN")
+ provisioning = str(volume.get_provisioning())
+ if provisioning == "THICK" and module.params["thin_provision"]:
+ volume.update_provisioning("THIN")
changed = True
- if type == "THIN" and not module.params["thin_provision"]:
- if not module.check_mode:
- volume.update_provisioning("THICK")
+ if provisioning == "THIN" and not module.params["thin_provision"]:
+ volume.update_provisioning("THICK")
changed = True
if module.params["write_protected"] is not None:
is_write_prot = volume.is_write_protected()
desired_is_write_prot = module.params["write_protected"]
if is_write_prot != desired_is_write_prot:
volume.update_field("write_protected", desired_is_write_prot)
+ changed = True
return changed
@api_wrapper
def delete_volume(module, volume):
- """ Delete Volume. Volume could be a snapshot."""
+ """ Delete Volume. Volume could be a snapshot. """
+ changed = False
if not module.check_mode:
volume.delete()
- changed = True
- return True
+ changed = True
+ return changed
@api_wrapper
@@ -274,15 +286,11 @@ def create_snapshot(module, system):
parent_volume_name = module.params["parent_volume_name"]
try:
parent_volume = system.volumes.get(name=parent_volume_name)
- except ObjectNotFound as err:
- msg = "Cannot create snapshot {0}. Parent volume {1} not found".format(
- snapshot_name, parent_volume_name
- )
+ except ObjectNotFound:
+ msg = f"Cannot create snapshot {snapshot_name}. Parent volume {parent_volume_name} not found"
module.fail_json(msg=msg)
if not parent_volume:
- msg = "Cannot find new snapshot's parent volume named {0}".format(
- parent_volume_name
- )
+ msg = f"Cannot find new snapshot's parent volume named {parent_volume_name}"
module.fail_json(msg=msg)
if not module.check_mode:
if module.params["snapshot_lock_only"]:
@@ -304,9 +312,7 @@ def create_snapshot(module, system):
@api_wrapper
def update_snapshot(module, snapshot):
- """
- Update/refresh snapshot. May also lock it.
- """
+ """ Update/refresh snapshot. May also lock it. """
refresh_changed = False
if not module.params["snapshot_lock_only"]:
snap_is_locked = snapshot.get_lock_state() == "LOCKED"
@@ -331,88 +337,18 @@ def update_snapshot(module, snapshot):
return refresh_changed or lock_changed
-def get_sys_pool_vol_parname(module):
+def handle_stat(module):
+ """ Handle the stat state """
system = get_system(module)
- pool = get_pool(module, system)
- if module.params["name"]:
+ if module.params['name']:
volume = get_volume(module, system)
else:
- volume = get_vol_sn(module, system)
- parname = module.params["parent_volume_name"]
- return (system, pool, volume, parname)
-
-
-def check_snapshot_lock_options(module):
- """
- Check if specified options are feasible for a snapshot.
-
- Prevent very long lock times.
- max_delta_minutes limits locks to 30 days (43200 minutes).
-
- This functionality is broken out from manage_snapshot_locks() to allow
- it to be called by create_snapshot() before the snapshot is actually
- created.
- """
- snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
-
- if snapshot_lock_expires_at: # Then user has specified wish to lock snap
- lock_expires_at = arrow.get(snapshot_lock_expires_at)
-
- # Check for lock in the past
- now = arrow.utcnow()
- if lock_expires_at <= now:
- msg = "Cannot lock snapshot with a snapshot_lock_expires_at "
- msg += "of '{0}' from the past".format(snapshot_lock_expires_at)
- module.fail_json(msg=msg)
-
- # Check for lock later than max lock, i.e. too far in future.
- max_delta_minutes = 43200 # 30 days in minutes
- max_lock_expires_at = now.shift(minutes=max_delta_minutes)
- if lock_expires_at >= max_lock_expires_at:
- msg = "snapshot_lock_expires_at exceeds {0} days in the future".format(
- max_delta_minutes // 24 // 60
- )
- module.fail_json(msg=msg)
-
-
-def manage_snapshot_locks(module, snapshot):
- """
- Manage the locking of a snapshot. Check for bad lock times.
- See check_snapshot_lock_options() which has additional checks.
- """
- name = module.params["name"]
- snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
- snap_is_locked = snapshot.get_lock_state() == "LOCKED"
- current_lock_expires_at = snapshot.get_lock_expires_at()
- changed = False
-
- check_snapshot_lock_options(module)
-
- if snapshot_lock_expires_at: # Then user has specified wish to lock snap
- lock_expires_at = arrow.get(snapshot_lock_expires_at)
- if snap_is_locked and lock_expires_at < current_lock_expires_at:
- # Lock earlier than current lock
- msg = "snapshot_lock_expires_at '{0}' preceeds the current lock time of '{1}'".format(
- lock_expires_at, current_lock_expires_at
- )
- module.fail_json(msg=msg)
- elif snap_is_locked and lock_expires_at == current_lock_expires_at:
- # Lock already set to correct time
- pass
- else:
- # Set lock
- if not module.check_mode:
- snapshot.update_lock_expires_at(lock_expires_at)
- changed = True
- return changed
-
-
-def handle_stat(module):
- system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ volume = get_vol_by_sn(module, system)
if not volume:
- msg = "Volume {0} not found. Cannot stat.".format(module.params["name"])
+ msg = f"Volume {module.params['name']} not found. Cannot stat."
module.fail_json(msg=msg)
fields = volume.get_fields() # from_cache=True, raw_value=True)
+
created_at = str(fields.get("created_at", None))
has_children = fields.get("has_children", None)
lock_expires_at = str(volume.get_lock_expires_at())
@@ -429,7 +365,7 @@ def handle_stat(module):
volume_type = fields.get("type", None)
write_protected = fields.get("write_protected", None)
if volume_type == "SNAPSHOT":
- msg = "Snapshot stat found"
+ msg = "Volume snapshot stat found"
else:
msg = "Volume stat found"
@@ -456,7 +392,12 @@ def handle_stat(module):
def handle_present(module):
- system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ """ Handle the present state """
+ system = get_system(module)
+ if module.params["name"]:
+ volume = get_volume(module, system)
+ else:
+ volume = get_vol_by_sn(module, system)
volume_type = module.params["volume_type"]
is_restoring = module.params["restore_volume_from_snapshot"]
if volume_type == "master":
@@ -465,7 +406,11 @@ def handle_present(module):
module.exit_json(changed=changed, msg="Volume created")
else:
changed = update_volume(module, volume)
- module.exit_json(changed=changed, msg="Volume updated")
+ if changed:
+ msg = "Volume updated"
+ else:
+ msg = "Volume present. No changes were required"
+ module.exit_json(changed=changed, msg=msg)
elif volume_type == "snapshot":
snapshot = volume
if is_restoring:
@@ -484,7 +429,12 @@ def handle_present(module):
def handle_absent(module):
- system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ """ Handle the absent state """
+ system = get_system(module)
+ if module.params["name"]:
+ volume = get_volume(module, system)
+ else:
+ volume = get_vol_by_sn(module, system)
volume_type = module.params["volume_type"]
if volume and volume.get_lock_state() == "LOCKED":
@@ -498,10 +448,10 @@ def handle_absent(module):
changed = delete_volume(module, volume)
module.exit_json(changed=changed, msg="Volume removed")
elif volume_type == "snapshot":
- if not volume:
+ snapshot = volume
+ if not snapshot:
module.exit_json(changed=False, msg="Snapshot already absent")
else:
- snapshot = volume
changed = delete_volume(module, snapshot)
module.exit_json(changed=changed, msg="Snapshot removed")
else:
@@ -509,7 +459,7 @@ def handle_absent(module):
def execute_state(module):
- # Handle different write_protected defaults depending on volume_type.
+ """ Handle each state. Handle different write_protected defaults depending on volume_type. """
if module.params["volume_type"] == "snapshot":
if module.params["write_protected"] in ["True", "true", "Default"]:
module.params["write_protected"] = True
@@ -521,8 +471,8 @@ def execute_state(module):
else:
module.params["write_protected"] = True
else:
- msg = f"An error has occurred handling volume_type '{module.params['volume_type']}' or write_protected '{module.params['write_protected']}' values"
- module.fail_json(msg=msg)
+ msg = f"An error has occurred handling volume_type {module.params['volume_type']} or write_protected {module.params['write_protected']} values"
+ module.fail_json(msg)
state = module.params["state"]
try:
@@ -533,9 +483,7 @@ def execute_state(module):
elif state == "absent":
handle_absent(module)
else:
- module.fail_json(
- msg="Internal handler error. Invalid state: {0}".format(state)
- )
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
finally:
system = get_system(module)
system.logout()
@@ -543,22 +491,32 @@ def execute_state(module):
def check_options(module):
"""Verify module options are sane"""
+ name = module.params["name"]
+ serial = module.params["serial"]
state = module.params["state"]
size = module.params["size"]
pool = module.params["pool"]
volume_type = module.params["volume_type"]
parent_volume_name = module.params["parent_volume_name"]
+ if state == "stat":
+ if not name and not serial:
+ msg = "Name or serial parameter must be provided"
+ module.fail_json(msg)
+ if state in ["present", "absent"]:
+ if not name:
+ msg = "Name parameter must be provided"
+ module.fail_json(msg=msg)
+
if state == "present":
if volume_type == "master":
- if state == "present":
- if parent_volume_name:
- msg = "parent_volume_name should not be specified "
- msg += "if volume_type is 'volume'. Snapshots only."
- module.fail_json(msg=msg)
- if not size:
- msg = "Size is required to create a volume"
- module.fail_json(msg=msg)
+ if parent_volume_name:
+ msg = "parent_volume_name should not be specified "
+ msg += "if volume_type is 'master'. Used for snapshots only."
+ module.fail_json(msg=msg)
+ if not size:
+ msg = "Size is required to create a volume"
+ module.fail_json(msg=msg)
elif volume_type == "snapshot":
if size or pool:
msg = "Neither pool nor size should not be specified "
@@ -572,26 +530,28 @@ def check_options(module):
else:
msg = "A programming error has occurred"
module.fail_json(msg=msg)
+ if not pool and volume_type == "master":
+ msg = "For state 'present', pool is required"
+ module.fail_json(msg=msg)
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name=dict(required=False),
- parent_volume_name=dict(default=None, required=False, type=str),
+ name=dict(required=False, default=None),
+ parent_volume_name=dict(default=None, required=False, type="str"),
pool=dict(required=False),
- size=dict(),
- serial=dict(),
+ restore_volume_from_snapshot=dict(default=False, type="bool"),
+ serial=dict(required=False, default=None),
+ size=dict(required=False, default=None),
snapshot_lock_expires_at=dict(),
- snapshot_lock_only=dict(type="bool", default=False),
+ snapshot_lock_only=dict(default=False, type="bool"),
state=dict(default="present", choices=["stat", "present", "absent"]),
thin_provision=dict(type="bool", default=True),
- write_protected=dict(
- default="Default", choices=["Default", "True", "False"]
- ),
volume_type=dict(default="master", choices=["master", "snapshot"]),
- restore_volume_from_snapshot=dict(default=False, type=bool),
+ write_protected=dict(default="Default", choices=["Default", "True", "False"]),
)
)
@@ -600,16 +560,14 @@ def main():
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib("infinisdk"))
- if not HAS_ARROW:
- module.fail_json(msg=missing_required_lib("arrow"))
+ if not HAS_CAPACITY:
+ module.fail_json(msg=missing_required_lib("capacity"))
if module.params["size"]:
try:
Capacity(module.params["size"])
- except Exception:
- module.fail_json(
- msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units"
- )
+ except Exception: # pylint: disable=broad-exception-caught
+ module.fail_json(msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units")
check_options(module)
execute_state(module)