summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/infinidat/infinibox/plugins
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/infinidat/infinibox/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py38
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/filters/psus_filters.py28
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py216
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/config.py114
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/globalnamesbin0 -> 6 bytes
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/historybin0 -> 14 bytes
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/objectdbbin0 -> 6 bytes
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py264
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py264
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py263
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py203
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py185
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py355
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py287
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py376
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py332
-rw-r--r--collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py466
18 files changed, 3391 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
new file mode 100644
index 00000000..e0939395
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+class ModuleDocFragment(object):
+
+ # Standard Infinibox documentation fragment
+ DOCUMENTATION = r'''
+options:
+ system:
+ description:
+ - Infinibox Hostname or IPv4 Address.
+ type: str
+ required: true
+ user:
+ description:
+ - Infinibox User username with sufficient priveledges ( see notes ).
+ required: false
+ password:
+ description:
+ - Infinibox User password.
+ type: str
+notes:
+ - This module requires infinisdk python library
+ - You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables
+ if user and password arguments are not passed to the module directly
+ - Ansible uses the infinisdk configuration file C(~/.infinidat/infinisdk.ini) if no credentials are provided.
+ See U(http://infinisdk.readthedocs.io/en/latest/getting_started.html)
+ - All Infinidat modules support check mode (--check). However, a dryrun that creates
+ resources may fail if the resource dependencies are not met for a task.
+ For example, consider a task that creates a volume in a pool.
+ If the pool does not exist, the volume creation task will fail.
+ It will fail even if there was a previous task in the playbook that would have created the pool but
+ did not because the pool creation was also part of the dry run.
+requirements:
+ - python2 >= 2.7 or python3 >= 3.6
+ - infinisdk (https://infinisdk.readthedocs.io/en/latest/)
+'''
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/filters/psus_filters.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/filters/psus_filters.py
new file mode 100644
index 00000000..adcefbe1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/filters/psus_filters.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from ansible.errors import AnsibleError
+import datetime
+
+def delta_time(dt, **kwargs):
+ """
+ Add to the time.
+ Ref: https://docs.python.org/3.6/library/datetime.html#timedelta-objects
+ """
+ return dt + datetime.timedelta(**kwargs)
+
+class FilterModule(object):
+ """
+ A filter look up class for custom filter plugins.
+ Ref: https://www.dasblinkenlichten.com/creating-ansible-filter-plugins/
+ """
+ def filters(self):
+ """
+ Lookup the filter function by name and execute it.
+ """
+ return self.filter_map
+
+ filter_map = {
+ 'delta_time': delta_time,
+ }
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
new file mode 100644
index 00000000..2ec4bb1e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+HAS_INFINISDK = True
+try:
+ from infinisdk import InfiniBox, core
+except ImportError:
+ HAS_INFINISDK = False
+
+from functools import wraps
+from os import environ
+from os import path
+from datetime import datetime
+from infinisdk.core.exceptions import ObjectNotFound
+
+
+def unixMillisecondsToDate(unix_ms):
+ return (datetime.utcfromtimestamp(unix_ms/1000.), 'UTC')
+
+
+def api_wrapper(func):
+ """ Catch API Errors Decorator"""
+ @wraps(func)
+ def __wrapper(*args, **kwargs):
+ module = args[0]
+ try:
+ return func(*args, **kwargs)
+ except core.exceptions.APICommandException as e:
+ module.fail_json(msg=e.message)
+ except core.exceptions.SystemNotFoundException as e:
+ module.fail_json(msg=e.message)
+ except Exception:
+ raise
+ return __wrapper
+
+
+@api_wrapper
+def get_system(module):
+ """Return System Object or Fail"""
+ box = module.params['system']
+ user = module.params.get('user', None)
+ password = module.params.get('password', None)
+
+ if user and password:
+ system = InfiniBox(box, auth=(user, password), use_ssl=True)
+ elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
+ system = InfiniBox(box, \
+ auth=(environ.get('INFINIBOX_USER'), \
+ environ.get('INFINIBOX_PASSWORD')), \
+ use_ssl=True)
+ elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
+ system = InfiniBox(box, use_ssl=True)
+ else:
+ module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
+
+ try:
+ system.login()
+ except Exception:
+ module.fail_json(msg="Infinibox authentication failed. Check your credentials")
+ return system
+
+
+def infinibox_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+ return dict(
+ system=dict(required=True),
+ user=dict(),
+ password=dict(no_log=True),
+ )
+
+
+def infinibox_required_together():
+ """Return the default list used for the required_together argument to AnsibleModule"""
+ return [['user', 'password']]
+
+
+def merge_two_dicts(dict1, dict2):
+ """
+ Merge two dicts into one and return.
+ result = {**dict1, **dict2} only works in py3.5+.
+ """
+ result = dict1.copy()
+ result.update(dict2)
+ return result
+
+
+@api_wrapper
+def get_pool(module, system):
+ """
+ Return Pool. Try key look up using 'pool', or if that fails, 'name'.
+ If the pool is not found, return None.
+ """
+ try:
+ try:
+ name = module.params['pool']
+ except KeyError:
+ name = module.params['name']
+ return system.pools.get(name=name)
+ except Exception:
+ return None
+
+
+@api_wrapper
+def get_filesystem(module, system):
+ """Return Filesystem or None"""
+ try:
+ try:
+ filesystem = system.filesystems.get(name=module.params['filesystem'])
+ except KeyError:
+ filesystem = system.filesystems.get(name=module.params['name'])
+ return filesystem
+ except Exception:
+ return None
+
+
+@api_wrapper
+def get_export(module, system):
+ """Return export if found or None if not found"""
+ try:
+ try:
+ export_name = module.params['export']
+ except KeyError:
+ export_name = module.params['name']
+
+ export = system.exports.get(export_path=export_name)
+ except ObjectNotFound as err:
+ return None
+
+ return export
+
+
+@api_wrapper
+def get_volume(module, system):
+ """Return Volume or None"""
+ try:
+ try:
+ volume = system.volumes.get(name=module.params['name'])
+ except KeyError:
+ volume = system.volumes.get(name=module.params['volume'])
+ return volume
+ except Exception:
+ return None
+
+
+@api_wrapper
+def get_host(module, system):
+ """Find a host by the name specified in the module"""
+ host = None
+
+ for a_host in system.hosts.to_list():
+ a_host_name = a_host.get_name()
+ try:
+ host_param = module.params['name']
+ except KeyError:
+ host_param = module.params['host']
+
+ if a_host_name == host_param:
+ host = a_host
+ break
+ return host
+
+
+@api_wrapper
+def get_cluster(module, system):
+ """Find a cluster by the name specified in the module"""
+ cluster = None
+ #print("dir:", dir(system))
+
+ for a_cluster in system.host_clusters.to_list():
+ a_cluster_name = a_cluster.get_name()
+ cluster_param = module.params['name']
+
+ if a_cluster_name == cluster_param:
+ cluster = a_cluster
+ break
+ return cluster
+
+
+@api_wrapper
+def get_user(module, system):
+ """Find a user by the user_name specified in the module"""
+ user = None
+ user_name = module.params['user_name']
+ try:
+ user = system.users.get(name=user_name)
+ except ObjectNotFound:
+ pass
+ return user
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/config.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/config.py
new file mode 100644
index 00000000..dee2d1ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/config.py
@@ -0,0 +1,114 @@
+# The default ``config.py``
+# flake8: noqa
+
+
+def set_prefs(prefs):
+ """This function is called before opening the project"""
+
+ # Specify which files and folders to ignore in the project.
+ # Changes to ignored resources are not added to the history and
+ # VCSs. Also they are not returned in `Project.get_files()`.
+ # Note that ``?`` and ``*`` match all characters but slashes.
+ # '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
+ # 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
+ # '.svn': matches 'pkg/.svn' and all of its children
+ # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
+ # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
+ prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
+ '.hg', '.svn', '_svn', '.git', '.tox']
+
+ # Specifies which files should be considered python files. It is
+ # useful when you have scripts inside your project. Only files
+ # ending with ``.py`` are considered to be python files by
+ # default.
+ # prefs['python_files'] = ['*.py']
+
+ # Custom source folders: By default rope searches the project
+ # for finding source folders (folders that should be searched
+ # for finding modules). You can add paths to that list. Note
+ # that rope guesses project source folders correctly most of the
+ # time; use this if you have any problems.
+ # The folders should be relative to project root and use '/' for
+ # separating folders regardless of the platform rope is running on.
+ # 'src/my_source_folder' for instance.
+ # prefs.add('source_folders', 'src')
+
+ # You can extend python path for looking up modules
+ # prefs.add('python_path', '~/python/')
+
+ # Should rope save object information or not.
+ prefs['save_objectdb'] = True
+ prefs['compress_objectdb'] = False
+
+ # If `True`, rope analyzes each module when it is being saved.
+ prefs['automatic_soa'] = True
+ # The depth of calls to follow in static object analysis
+ prefs['soa_followed_calls'] = 0
+
+ # If `False` when running modules or unit tests "dynamic object
+ # analysis" is turned off. This makes them much faster.
+ prefs['perform_doa'] = True
+
+ # Rope can check the validity of its object DB when running.
+ prefs['validate_objectdb'] = True
+
+ # How many undos to hold?
+ prefs['max_history_items'] = 32
+
+ # Shows whether to save history across sessions.
+ prefs['save_history'] = True
+ prefs['compress_history'] = False
+
+ # Set the number spaces used for indenting. According to
+ # :PEP:`8`, it is best to use 4 spaces. Since most of rope's
+ # unit-tests use 4 spaces it is more reliable, too.
+ prefs['indent_size'] = 4
+
+ # Builtin and c-extension modules that are allowed to be imported
+ # and inspected by rope.
+ prefs['extension_modules'] = []
+
+ # Add all standard c-extensions to extension_modules list.
+ prefs['import_dynload_stdmods'] = True
+
+ # If `True` modules with syntax errors are considered to be empty.
+ # The default value is `False`; When `False` syntax errors raise
+ # `rope.base.exceptions.ModuleSyntaxError` exception.
+ prefs['ignore_syntax_errors'] = False
+
+ # If `True`, rope ignores unresolvable imports. Otherwise, they
+ # appear in the importing namespace.
+ prefs['ignore_bad_imports'] = False
+
+ # If `True`, rope will insert new module imports as
+ # `from <package> import <module>` by default.
+ prefs['prefer_module_from_imports'] = False
+
+ # If `True`, rope will transform a comma list of imports into
+ # multiple separate import statements when organizing
+ # imports.
+ prefs['split_imports'] = False
+
+ # If `True`, rope will remove all top-level import statements and
+ # reinsert them at the top of the module when making changes.
+ prefs['pull_imports_to_top'] = True
+
+ # If `True`, rope will sort imports alphabetically by module name instead
+ # of alphabetically by import statement, with from imports after normal
+ # imports.
+ prefs['sort_imports_alphabetically'] = False
+
+ # Location of implementation of
+ # rope.base.oi.type_hinting.interfaces.ITypeHintingFactory In general
+ # case, you don't have to change this value, unless you're an rope expert.
+ # Change this value to inject you own implementations of interfaces
+ # listed in module rope.base.oi.type_hinting.providers.interfaces
+ # For example, you can add you own providers for Django Models, or disable
+ # the search type-hinting in a class hierarchy, etc.
+ prefs['type_hinting_factory'] = (
+ 'rope.base.oi.type_hinting.factory.default_type_hinting_factory')
+
+
+def project_opened(project):
+ """This function is called after opening the project"""
+ # Do whatever you like here!
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/globalnames b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/globalnames
new file mode 100644
index 00000000..0a47446c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/globalnames
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/history b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/history
new file mode 100644
index 00000000..4490a5d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/history
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/objectdb b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/objectdb
new file mode 100644
index 00000000..0a47446c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/.ropeproject/objectdb
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
new file mode 100644
index 00000000..1b7e363b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+from infi.dtypes.iqn import make_iscsi_name
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_cluster
+version_added: 2.9
+short_description: Create, Delete and Modify Host Cluster on Infinibox
+description:
+ - This module creates, deletes or modifies host clusters on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Cluster Name
+ required: true
+ state:
+ description:
+ - Creates/Modifies Cluster when present, removes when absent, or provides
+ details of a cluster when stat.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new cluster
+ infini_cluster:
+ name: foo_cluster
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_system, get_cluster, unixMillisecondsToDate, merge_two_dicts
+
+
+@api_wrapper
+def get_host_by_name(system, host_name):
+ """Find a host by the name specified in the module"""
+ host = None
+
+ for a_host in system.hosts.to_list():
+ a_host_name = a_host.get_name()
+ if a_host_name == host_name:
+ host = a_host
+ break
+ return host
+
+
+@api_wrapper
+def create_cluster(module, system):
+ print("create cluster")
+ changed = True
+ if not module.check_mode:
+ cluster = system.host_clusters.create(name=module.params['name'])
+ cluster_hosts = module.params['cluster_hosts']
+ for cluster_host in cluster_hosts:
+ if cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, cluster_host['host_name'])
+ cluster.add_host(host)
+ print("Added host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
+ else:
+ print("Skipped adding (absent) host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
+ return changed
+
+
+@api_wrapper
+def update_cluster(module, system, cluster):
+ print("update cluster")
+ changed = False
+
+ # e.g. of one host dict found in the module.params['cluster_hosts'] list:
+ # {host_name: <'some_name'>, host_cluster_state: <'present' or 'absent'>}
+ module_cluster_hosts = module.params['cluster_hosts']
+ current_cluster_hosts_names = [host.get_name() for host in cluster.get_field('hosts')]
+ print("current_cluster_hosts_names:", current_cluster_hosts_names)
+ for module_cluster_host in module_cluster_hosts:
+ module_cluster_host_name = module_cluster_host['host_name']
+ print("module_cluster_host_name:", module_cluster_host_name)
+ # Need to add host to cluster?
+ if module_cluster_host_name not in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = 'Cannot find host {0} to add to cluster {1}'.format(
+ module_cluster_host_name,
+ cluster.get_name(),
+ )
+ module.fail_json(msg=msg)
+ cluster.add_host(host)
+ print("Added host {0} to cluster {1}".format(host.get_name(), cluster.get_name()))
+ changed = True
+ # Need to remove host from cluster?
+ elif module_cluster_host_name in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'absent':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = 'Cannot find host {0} to add to cluster {1}'.format(
+ module_cluster_host_name,
+ cluster.get_name(),
+ )
+ module.fail_json(msg=msg)
+ cluster.remove_host(host)
+ print("Removed host {0} from cluster {1}".format(host.get_name(), cluster.get_name()))
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_cluster(module, cluster):
+ assert cluster, "Cluster not found"
+ changed = True
+ if not module.check_mode:
+ cluster.delete()
+ return changed
+
+
+def get_sys_cluster(module):
+ system = get_system(module)
+ cluster = get_cluster(module, system)
+ return (system, cluster)
+
+
+def get_cluster_fields(cluster):
+ fields = cluster.get_fields(from_cache=True, raw_value=True)
+ created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
+ field_dict = dict(
+ hosts=[],
+ id=cluster.id,
+ created_at=created_at,
+ created_at_timezone=created_at_timezone,
+ )
+ hosts = cluster.get_hosts()
+ for host in hosts:
+ host_dict = {
+ 'host_id': host.id,
+ 'host_name': host.get_name(),
+ }
+ field_dict['hosts'].append(host_dict)
+ return field_dict
+
+
+def handle_stat(module):
+ system, cluster = get_sys_cluster(module)
+ cluster_name = module.params["name"]
+ if not cluster:
+ module.fail_json(msg='Cluster {0} not found'.format(cluster_name))
+ field_dict = get_cluster_fields(cluster)
+ result = dict(
+ changed=False,
+ msg='Cluster stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, cluster = get_sys_cluster(module)
+ cluster_name = module.params["name"]
+ if not cluster:
+ changed = create_cluster(module, system)
+ msg='Cluster {0} created'.format(cluster_name)
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ changed = update_cluster(module, system, cluster)
+ if changed:
+ msg='Cluster {0} updated'.format(cluster_name)
+ else:
+ msg='Cluster {0} required no changes'.format(cluster_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, cluster = get_sys_cluster(module)
+ cluster_name = module.params["name"]
+ if not cluster:
+ changed = False
+ msg="Cluster {0} already absent".format(cluster_name)
+ else:
+ changed = delete_cluster(module, cluster)
+ msg="Cluster {0} removed".format(cluster_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ state = module.params['state']
+ if state == 'present':
+ if module.params['cluster_hosts'] is None:
+ module.fail_json(msg='Option cluster_hosts, a list, must be provided')
+
+ cluster_hosts = module.params['cluster_hosts']
+ for host in cluster_hosts:
+ try:
+ # Check host has required keys
+ valid_keys = ['host_name', 'host_cluster_state']
+ for valid_key in valid_keys:
+ _ = host[valid_key]
+ # Check host has no unknown keys
+ if len(host.keys()) != len(valid_keys):
+ raise KeyError
+ except KeyError:
+ msg = 'With state present, all cluster_hosts ' \
+ + 'require host_name and host_cluster_state key:values ' \
+ + 'and no others'
+ module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ cluster_hosts=dict(required=False, type=list),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
new file mode 100644
index 00000000..641242a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat(info@infinidat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_export
+version_added: 2.3
+short_description: Create, Delete or Modify NFS Exports on Infinibox
+description:
+ - This module creates, deletes or modifies NFS exports on Infinibox.
+author: Gregory Shulov (@GR360RY)
+options:
+ name:
+ description:
+ - Export name. Must start with a forward slash, e.g. name=/data.
+ required: true
+ state:
+ description:
+ - Creates/Modifies export when present, removes when absent, or provides
+ export details with stat.
+ required: false
+ default: "present"
+ choices: [ "stat", "present", "absent" ]
+ client_list:
+ description:
+ - List of dictionaries with client entries. See examples.
+ Check infini_export_client module to modify individual NFS client entries for export.
+ default: "All Hosts(*), RW, no_root_squash: True"
+ required: false
+ filesystem:
+ description:
+ - Name of exported file system.
+ required: true
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - munch
+'''
+
+EXAMPLES = r'''
+- name: Export bar filesystem under foo pool as /data
+ infini_export:
+ name: /data01
+ filesystem: foo
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Get status of export bar filesystem under foo pool as /data
+ infini_export:
+ name: /data01
+ filesystem: foo
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Remove export bar filesystem under foo pool as /data
+ infini_export:
+ name: /data01
+ filesystem: foo
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Export and specify client list explicitly
+ infini_export:
+ name: /data02
+ filesystem: foo
+ client_list:
+ - client: 192.168.0.2
+ access: RW
+ no_root_squash: True
+ - client: 192.168.0.100
+ access: RO
+ no_root_squash: False
+ - client: 192.168.0.10-192.168.0.20
+ access: RO
+ no_root_squash: False
+ system: ibox001
+ user: admin
+ password: secret
+'''
+
+# RETURN = r''' # '''
+import traceback
+
+MUNCH_IMP_ERR = None
+try:
+ from munch import unmunchify
+ HAS_MUNCH = True
+except ImportError:
+ MUNCH_IMP_ERR = traceback.format_exc()
+ HAS_MUNCH = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_system, get_filesystem, get_export, merge_two_dicts
+
+
+def transform(d):
+ return frozenset(d.items())
+
+
+def create_export(module, export, filesystem, system):
+ """ Create new filesystem or update existing one"""
+ assert not export
+ changed = False
+
+ name = module.params['name']
+ client_list = module.params['client_list']
+
+ if not module.check_mode:
+ export = system.exports.create(export_path=name, filesystem=filesystem)
+ if client_list:
+ export.update_permissions(client_list)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_export(module, export, filesystem, system):
+ """ Create new filesystem or update existing one"""
+ assert export
+ changed = False
+
+ name = module.params['name']
+ client_list = module.params['client_list']
+
+ if client_list:
+ if set(map(transform, unmunchify(export.get_permissions()))) \
+ != set(map(transform, client_list)):
+ if not module.check_mode:
+ export.update_permissions(client_list)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_export(module, export):
+ """ Delete file system"""
+ if not module.check_mode:
+ export.delete()
+ changed = True
+ return changed
+
+
+def get_sys_exp_fs(module):
+ system = get_system(module)
+ filesystem = get_filesystem(module, system)
+ export = get_export(module, system)
+ return (system, export, filesystem)
+
+
+def get_export_fields(export):
+ fields = export.get_fields() #from_cache=True, raw_value=True)
+ export_id = fields.get('id', None)
+ permissions = fields.get('permissions', None)
+ enabled = fields.get('enabled', None)
+ field_dict = dict(
+ id=export_id,
+ permissions=permissions,
+ enabled=enabled,
+ )
+ return field_dict
+
+
+def handle_stat(module):
+ """
+ Gather stats on export and return. Changed is always False.
+ """
+ system, export, filesystem = get_sys_exp_fs(module)
+ if not export:
+ module.fail_json(msg='Export "{}" of file system "{}" not found'.format(
+ module.params['name'],
+ module.params['filesystem'],
+ ))
+
+ field_dict = get_export_fields(export)
+ result = dict(
+ changed=False,
+ msg='File system stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, export, filesystem = get_sys_exp_fs(module)
+ if not filesystem:
+ module.fail_json(msg='File system {0} not found'.format(module.params['filesystem']))
+ elif not export:
+ changed = create_export(module, export, filesystem, system)
+ module.exit_json(changed=changed, msg="File system export created")
+ else:
+ changed = update_export(module, export, filesystem, system)
+ module.exit_json(changed=changed, msg="File system export updated")
+
+
+def handle_absent(module):
+ system, export, filesystem = get_sys_exp_fs(module)
+ if not export:
+ changed = False
+ msg="Export of {0} already absent".format(module.params['filesystem'])
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ changed = delete_export(module, export)
+ msg="Export of {0} deleted".format(module.params['filesystem'])
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ filesystem=dict(required=True),
+ client_list=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+ if not HAS_MUNCH:
+ module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR)
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
new file mode 100644
index 00000000..fa2d7a5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_export_client
+version_added: 2.3
+short_description: Create, Delete or Modify NFS Client(s) for existing exports on Infinibox
+description:
+ - This module creates, deletes or modifys NFS client(s) for existing exports on Infinibox.
+author: Gregory Shulov (@GR360RY)
+options:
+ client:
+ description:
+ - Client IP or Range. Ranges can be defined as follows
+ 192.168.0.1-192.168.0.254.
+ aliases: ['name']
+ required: true
+ state:
+ description:
+ - Creates/Modifies client when present and removes when absent.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+ access_mode:
+ description:
+ - Read Write or Read Only Access.
+ choices: [ "RW", "RO" ]
+ default: RW
+ required: false
+ no_root_squash:
+ description:
+ - Don't squash root user to anonymous. Will be set to "no" on creation if not specified explicitly.
+ type: bool
+ default: no
+ required: false
+ export:
+ description:
+ - Name of the export.
+ required: true
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - munch
+'''
+
+EXAMPLES = r'''
+- name: Make sure nfs client 10.0.0.1 is configured for export. Allow root access
+ infini_export_client:
+ client: 10.0.0.1
+ access_mode: RW
+ no_root_squash: yes
+ export: /data
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Add multiple clients with RO access. Squash root privileges
+ infini_export_client:
+ client: "{{ item }}"
+ access_mode: RO
+ no_root_squash: no
+ export: /data
+ user: admin
+ password: secret
+ system: ibox001
+ with_items:
+ - 10.0.0.2
+ - 10.0.0.3
+'''
+
+# RETURN = r''' # '''
+
+import traceback
+
+MUNCH_IMP_ERR = None
+try:
+ from munch import Munch, unmunchify
+ HAS_MUNCH = True
+except ImportError:
+ MUNCH_IMP_ERR = traceback.format_exc()
+ HAS_MUNCH = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_system, get_export, merge_two_dicts
+
+
+@api_wrapper
+def update_client(module, export):
+ """
+ Update export client list. Note that this will replace existing clients.
+ """
+
+ changed = False
+
+ client = module.params['client']
+ access_mode = module.params['access_mode']
+ no_root_squash = module.params['no_root_squash']
+
+ client_list = export.get_permissions()
+ client_not_in_list = True
+
+ for item in client_list:
+ if item.client == client: # Update client
+ client_not_in_list = False
+ if item.access != access_mode:
+ item.access = access_mode
+ changed = True
+ if item.no_root_squash is not no_root_squash:
+ item.no_root_squash = no_root_squash
+ changed = True
+
+ # If access_mode and/or no_root_squash not passed as arguments to the module,
+ # use access_mode with RW value and set no_root_squash to False
+ if client_not_in_list: # Create client
+ changed = True
+ client_list.append(Munch(client=client, access=access_mode, no_root_squash=no_root_squash))
+
+ if changed:
+ for index, item in enumerate(client_list):
+ client_list[index] = unmunchify(item)
+ if not module.check_mode:
+ export.update_permissions(client_list)
+
+ return changed
+
+@api_wrapper
+def delete_client(module, export):
+ """Update export client list"""
+ if export is None and module.params['state'] == 'absent':
+ module.exit_json(changed=False)
+
+ changed = False
+
+ client = module.params['client']
+ client_list = export.get_permissions()
+
+ for index, item in enumerate(client_list):
+ if item.client == client:
+ changed = True
+ del client_list[index]
+
+ if changed:
+ for index, item in enumerate(client_list):
+ client_list[index] = unmunchify(item)
+ if not module.check_mode:
+ export.update_permissions(client_list)
+
+ return changed
+
+
+def get_sys_exp(module):
+ system = get_system(module)
+ export = get_export(module, system)
+ return (system, export)
+
+
+def get_export_client_fields(export, client_name):
+ fields = export.get_fields() #from_cache=True, raw_value=True)
+ permissions = fields.get('permissions', None)
+ #field_dict = {}
+ for munched_perm in permissions:
+ perm = unmunchify(munched_perm)
+ if perm['client'] == client_name: # Found client
+ field_dict = dict(
+ access_mode=perm['access'],
+ no_root_squash=perm['no_root_squash'],
+ )
+ return field_dict
+ assert False, "No client match to exports found"
+
+
+def handle_stat(module):
+ system, export = get_sys_exp(module)
+ if not export:
+ module.fail_json(msg='Export {0} not found'.format(module.params['export']))
+ client_name = module.params['client']
+ field_dict = get_export_client_fields(export, client_name)
+ result = dict(
+ changed=False,
+ msg='Export client stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, export = get_sys_exp(module)
+ if not export:
+ msg='Export {0} not found'.format(module.params['export'])
+ module.fail_json(msg=msg)
+
+ changed = update_client(module, export)
+ msg="Export client updated"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, export = get_sys_exp(module)
+ if not export:
+ changed = False
+ msg="Export client already absent"
+ module.exit_json(changed=False, msg=msg)
+ else:
+ changed = delete_client(module, export)
+ msg="Export client removed"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ client=dict(required=True),
+ access_mode=dict(choices=['RO', 'RW'], default='RW'),
+ no_root_squash=dict(type='bool', default='no'),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ export=dict(required=True)
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+ if not HAS_MUNCH:
+ module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR)
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
new file mode 100644
index 00000000..0c365fce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_fs
+version_added: 2.3
+short_description: Create, Delete or Modify filesystems on Infinibox
+description:
+ - This module creates, deletes or modifies filesystems on Infinibox.
+author: Gregory Shulov (@GR360RY)
+options:
+ name:
+ description:
+ - File system name.
+ required: true
+ state:
+ description:
+ - Creates/Modifies file system when present or removes when absent.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - File system size in MB, GB or TB units. See examples.
+ required: false
+ pool:
+ description:
+ - Pool that will host file system.
+ required: true
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - capacity
+'''
+
+EXAMPLES = r'''
+- name: Create new file system named foo under pool named bar
+ infini_fs:
+ name: foo
+ size: 1TB
+ pool: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+import traceback
+
+CAPACITY_IMP_ERR = None
+try:
+ from capacity import KiB, Capacity
+ HAS_CAPACITY = True
+except ImportError:
+ CAPACITY_IMP_ERR = traceback.format_exc()
+ HAS_CAPACITY = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_pool, get_system, get_filesystem
+
+
+@api_wrapper
+def create_filesystem(module, system):
+ """Create Filesystem"""
+ changed = True
+ if not module.check_mode:
+ filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system))
+ if module.params['size']:
+ size = Capacity(module.params['size']).roundup(64 * KiB)
+ filesystem.update_size(size)
+ return changed
+
+
+@api_wrapper
+def update_filesystem(module, filesystem):
+ """Update Filesystem"""
+ changed = False
+ if module.params['size']:
+ size = Capacity(module.params['size']).roundup(64 * KiB)
+ if filesystem.get_size() != size:
+ if not module.check_mode:
+ filesystem.update_size(size)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_filesystem(module, filesystem):
+ """ Delete Filesystem"""
+ if not module.check_mode:
+ filesystem.delete()
+ module.exit_json(changed=True)
+
+
+def get_sys_pool_fs(module):
+ system = get_system(module)
+ pool = get_pool(module, system)
+ filesystem = get_filesystem(module, system)
+ return (system, pool, filesystem)
+
+
+def handle_stat(module):
+ system, pool, filesystem = get_sys_pool_fs(module)
+ if not pool:
+ module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
+ if not filesystem:
+ module.fail_json(msg='File system {0} not found'.format(module.params['name']))
+ fields = filesystem.get_fields() #from_cache=True, raw_value=True)
+ used = fields.get('used_size', None)
+ filesystem_id = fields.get('id', None)
+
+ result = dict(
+ changed=False,
+ size=str(filesystem.get_size()),
+ used=str(used),
+ id=filesystem_id,
+ msg='File system stat found'
+ )
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, pool, filesystem = get_sys_pool_fs(module)
+ if not pool:
+ module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
+ if not filesystem:
+ changed = create_filesystem(module, system)
+ module.exit_json(changed=changed, msg="File system created")
+ else:
+ changed = update_filesystem(module, filesystem)
+ module.exit_json(changed=changed, msg="File system updated")
+
+
+def handle_absent(module):
+ system, pool, filesystem = get_sys_pool_fs(module)
+ if not pool or not filesystem:
+ module.exit_json(changed=False, msg="File system already absent")
+ else:
+ changed = delete_filesystem(module, filesystem)
+ module.exit_json(changed=changed, msg="File system removed")
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ pool=dict(required=True),
+ size=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+ if not HAS_CAPACITY:
+ module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR)
+
+ if module.params['size']:
+ try:
+ Capacity(module.params['size'])
+ except Exception:
+ module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
new file mode 100644
index 00000000..d17c78ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+from infi.dtypes.iqn import make_iscsi_name
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_host
+version_added: 2.3
+short_description: Create, Delete and Modify Hosts on Infinibox
+description:
+ - This module creates, deletes or modifies hosts on Infinibox.
+author: Gregory Shulov (@GR360RY)
+options:
+ name:
+ description:
+ - Host Name
+ required: true
+ state:
+ description:
+ - Creates/Modifies Host when present or removes when absent
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new host
+ infini_host:
+ name: foo.example.com
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_system, get_host, unixMillisecondsToDate, merge_two_dicts
+
+
+@api_wrapper
+def create_host(module, system):
+
+ changed = True
+
+ if not module.check_mode:
+ host = system.hosts.create(name=module.params['name'])
+ return changed
+
+@api_wrapper
+def update_host(module, host):
+ changed = False
+ return changed
+
+@api_wrapper
+def delete_host(module, host):
+ changed = True
+ if not module.check_mode:
+ # May raise APICommandFailed if mapped, etc.
+ host.delete()
+ return changed
+
+
+def get_sys_host(module):
+ system = get_system(module)
+ host = get_host(module, system)
+ return (system, host)
+
+
+def get_host_fields(host):
+ fields = host.get_fields(from_cache=True, raw_value=True)
+ created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
+ field_dict = dict(
+ created_at=created_at,
+ created_at_timezone=created_at_timezone,
+ id=host.id,
+ iqns=[],
+ luns=[],
+ ports=[],
+ wwns=[],
+ )
+ luns = host.get_luns()
+ for lun in luns:
+ field_dict['luns'].append({'lun_id': lun.id,
+ 'lun_volume_id': lun.volume.id,
+ 'lun_volume_name': lun.volume.get_name(),
+ })
+ ports = host.get_ports()
+ for port in ports:
+ if str(type(port)) == "<class 'infi.dtypes.wwn.WWN'>":
+ field_dict['wwns'].append(str(port))
+ if str(type(port)) == "<class 'infi.dtypes.iqn.IQN'>":
+ field_dict['iqns'].append(str(port))
+ return field_dict
+
+
+def handle_stat(module):
+ system, host = get_sys_host(module)
+ host_name = module.params["name"]
+ if not host:
+ module.fail_json(msg='Host {0} not found'.format(host_name))
+ field_dict = get_host_fields(host)
+ result = dict(
+ changed=False,
+ msg='Host stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, host = get_sys_host(module)
+ host_name = module.params["name"]
+ if not host:
+ changed = create_host(module, system)
+ msg='Host {0} created'.format(host_name)
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ changed = update_host(module, host)
+ msg='Host {0} updated'.format(host_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, host = get_sys_host(module)
+ host_name = module.params["name"]
+ if not host:
+ msg="Host {0} already absent".format(host_name)
+ module.exit_json(changed=False, msg=msg)
+ else:
+ changed = delete_host(module, host)
+ msg="Host {0} removed".format(host_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
new file mode 100644
index 00000000..9163b29f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+from infinisdk.core.exceptions import APICommandFailed
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_map
+version_added: '2.10'
+short_description: Create and Delete mapping of a volume to a host on Infinibox
+description:
+ - This module creates or deletes mappings of volumes to hosts on
+ Infinibox. infini_map is implemented separately from infini_host to allow
+ ansible plays to remove, or make absent, a mapping without removing the host.
+author: David Ohlemacher (@ohlemacher)
+options:
+ host:
+ description:
+ - Host Name
+ required: true
+ volume:
+ description:
+ - Volume name to map to the host
+ required: true
+ state:
+ description:
+ - Creates mapping when present or removes when absent, or provides
+ details of a mapping when stat.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Map a volume to an existing host
+ infini_map:
+ host: foo.example.com
+ volume: bar
+ state: present # Default
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Unmap volume bar from host foo.example.com
+ infini_map:
+ host: foo.example.com
+ volume: bar
+ state: absent
+ system: ibox01
+ user: admin
+ password: secret
+
+- name: Stat mapping of volume bar to host foo.example.com
+ infini_map:
+ host: foo.example.com
+ volume: bar
+ state: stat
+ system: ibox01
+ user: admin
+ password: secret
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_pool, get_system, get_volume, get_host, merge_two_dicts
+
+
+def vol_is_mapped_to_host(volume, host):
+ volume_fields = volume.get_fields()
+ volume_id = volume_fields.get('id')
+ host_luns = host.get_luns()
+ #print('volume id: {0}'.format(volume_id))
+ #print('host luns: {0}'.format(str(host_luns)))
+ for lun in host_luns:
+ if lun.volume == volume:
+ #print('found mapped volume: {0}'.format(volume))
+ return True
+ return False
+
+
+def find_lun_use(module, host, volume):
+ check_result = {'lun_used': False, 'lun_volume_matches': False}
+ desired_lun = module.params['lun']
+
+ if desired_lun:
+ for host_lun in host.get_luns():
+ if desired_lun == host_lun.lun:
+ if host_lun.volume == volume:
+ check_result = {'lun_used': True, 'lun_volume_matches': True}
+ else:
+ check_result = {'lun_used': True, 'lun_volume_matches': False}
+
+ return check_result
+
+
+def find_lun(host, volume):
+ found_lun = None
+ luns = host.get_luns()
+
+ for lun in luns:
+ if lun.volume == volume:
+ found_lun = lun.lun
+ return found_lun
+
+
+@api_wrapper
+def create_mapping(module, system):
+ """
+ Create mapping of volume to host. If already mapped, exit_json with changed False.
+ """
+ changed = False
+
+ host = system.hosts.get(name=module.params['host'])
+ volume = get_volume(module, system)
+
+ lun_use = find_lun_use(module, host, volume)
+ if lun_use['lun_used']:
+ #assert not lun_use['lun_volume_matches'], "Cannot have matching lun and volume in create_mapping()"
+ msg = "Cannot create mapping of volume '{}' to host '{}' using lun '{}'. Lun in use.".format(
+ volume.get_name(),
+ host.get_name(),
+ module.params['lun'])
+ module.fail_json(msg=msg)
+
+ try:
+ desired_lun = module.params['lun']
+ if not module.check_mode:
+ host.map_volume(volume, lun=desired_lun)
+ changed = True
+ except APICommandFailed as err:
+ if "is already mapped" not in str(err):
+ module.fail_json('Cannot map volume {0} to host {1}: {2}'.format(
+ module.params['volume'],
+ module.params['host'],
+ str(err)))
+
+ return changed
+
+
+@api_wrapper
+def update_mapping(module, system):
+ host = system.hosts.get(name=module.params['host'])
+ volume = get_volume(module, system)
+ desired_lun = module.params['lun']
+
+ assert vol_is_mapped_to_host(volume, host)
+
+ if desired_lun:
+ found_lun = find_lun(host, volume)
+ if found_lun != desired_lun:
+ msg = "Cannot change the lun from '{}' to '{}' for existing mapping of volume '{}' to host '{}'".format(
+ found_lun,
+ desired_lun,
+ volume.get_name(),
+ host.get_name())
+ module.fail_json(msg=msg)
+
+ changed = False
+ return changed
+
+
+@api_wrapper
+def delete_mapping(module, system):
+ """
+ Remove mapping of volume from host. If the either the volume or host
+ do not exist, then there should be no mapping to unmap. If unmapping
+ generates a key error with 'has no logical units' in its message, then
+ the volume is not mapped. Either case, return changed=False.
+ """
+ changed = False
+ msg = ""
+
+ if not module.check_mode:
+ volume = get_volume(module, system)
+ host = system.hosts.get(name=module.params['host'])
+
+ if volume and host:
+ try:
+ existing_lun = find_lun(host, volume)
+ host.unmap_volume(volume)
+ changed = True
+ msg = "Volume '{0}' was unmapped from host '{1}' freeing lun '{2}'".format(
+ module.params['volume'],
+ module.params['host'],
+ existing_lun,
+ )
+ except KeyError as err:
+ if 'has no logical units' not in str(err):
+ module.fail_json('Cannot unmap volume {0} from host {1}: {2}'.format(
+ module.params['volume'],
+ module.params['host'],
+ str(err)))
+ else:
+ msg = "Volume {0} was not mapped to host {1} and so unmapping was not executed".format(
+ module.params['volume'],
+ module.params['host'],
+ )
+ else:
+ msg = "Either volume {0} or host {1} does not exist. Unmapping was not executed".format(
+ module.params['volume'],
+ module.params['host'],
+ )
+ else: # check_mode
+ changed = True
+
+ module.exit_json(msg=msg, changed=changed)
+
+
+def get_sys_vol_host(module):
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ return (system, volume, host)
+
+
+def get_mapping_fields(volume, host):
+ luns = host.get_luns()
+ for lun in luns:
+ if volume.get_name() == lun.volume.get_name():
+ field_dict = dict(
+ id=lun.id,
+ )
+ return field_dict
+ assert False, 'Failed to find lun details from volume {0} and host {1}'.format(
+ volume.get_name(), host.get_name())
+
+
+def handle_stat(module):
+ system, volume, host = get_sys_vol_host(module)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
+ if not volume:
+ module.fail_json(msg='Volume {0} not found'.format(volume_name))
+ if not host:
+ module.fail_json(msg='Host {0} not found'.format(host_name))
+ if not vol_is_mapped_to_host(volume, host):
+ msg = 'Volume {0} is not mapped to host {1}'.format(volume_name, host_name)
+ module.fail_json(msg=msg)
+
+ found_lun = find_lun(host, volume)
+
+ field_dict = get_mapping_fields(volume, host)
+ result = dict(
+ changed=False,
+ volume_lun=found_lun,
+ msg = 'Volume {0} is mapped to host {1}'.format(volume_name, host_name),
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, volume, host= get_sys_vol_host(module)
+ if not volume:
+ module.fail_json(changed=False, msg='Volume {0} not found'.format(
+ module.params['volume']))
+ if not host:
+ module.fail_json(changed=False, msg='Host {0} not found'.format(
+ module.params['host']))
+ if not vol_is_mapped_to_host(volume, host):
+ changed = create_mapping(module, system)
+ # TODO: Why is find_lun() returning None after creating the mapping?
+ # host.get_luns() returns an empty list, why?
+ # existing_lun = find_lun(host, volume)
+ # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
+ # volume.get_name(),
+ # host.get_name(),
+ # existing_lun,
+ # )
+ msg = "Volume '{0}' map to host '{1}' created".format(
+ volume.get_name(),
+ host.get_name()
+ )
+ else:
+ changed = update_mapping(module, system)
+ existing_lun = find_lun(host, volume)
+ msg = "Volume '{0}' map to host '{1}' already exists using lun '{2}'".format(
+ volume.get_name(),
+ host.get_name(),
+ existing_lun,
+ )
+
+ result = dict(
+ changed=changed,
+ msg=msg,
+ )
+ module.exit_json(**result)
+
+
+def handle_absent(module):
+ system, volume, host = get_sys_vol_host(module)
+ if not volume or not host:
+ module.exit_json(changed=False, msg='Mapping of volume {0} to host {1} already absent'.format(
+ module.params['volume'],
+ module.params['host']))
+ else:
+ changed = delete_mapping(module, system)
+ module.exit_json(changed=changed, msg="File system removed")
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ """
+ Gather auguments and manage mapping of vols to hosts.
+ """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ host=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ volume=dict(required=True),
+ lun=dict(type=int),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
new file mode 100644
index 00000000..06889fcb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_pool
+version_added: 2.3
+short_description: Create, Delete and Modify Pools on Infinibox
+description:
+ - This module to creates, deletes or modifies pools on Infinibox.
+author: Gregory Shulov (@GR360RY)
+options:
+ name:
+ description:
+ - Pool Name
+ required: true
+ state:
+ description:
+ - Creates/Modifies Pool when present or removes when absent
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Pool Physical Capacity in MB, GB or TB units.
+ If pool size is not set on pool creation, size will be equal to 1TB.
+ See examples.
+ required: false
+ vsize:
+ description:
+ - Pool Virtual Capacity in MB, GB or TB units.
+ If pool vsize is not set on pool creation, Virtual Capacity will be equal to Physical Capacity.
+ See examples.
+ required: false
+ ssd_cache:
+ description:
+ - Enable/Disable SSD Cache on Pool
+ required: false
+ default: yes
+ type: bool
+ compression:
+ description:
+ - Enable/Disable Compression on Pool
+ required: false
+ default: yes
+ type: bool
+ version_added: 2.8
+
+notes:
+ - Infinibox Admin level access is required for pool modifications
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - capacity
+'''
+
+EXAMPLES = r'''
+- name: Make sure pool foo exists. Set pool physical capacity to 10TB
+ infini_pool:
+ name: foo
+ size: 10TB
+ vsize: 10TB
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Disable SSD Cache on pool
+ infini_pool:
+ name: foo
+ ssd_cache: no
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Disable Compression on pool
+ infini_pool:
+ name: foo
+ compression: no
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+import traceback
+
+CAPACITY_IMP_ERR = None
+try:
+ from capacity import KiB, Capacity
+ HAS_CAPACITY = True
+except ImportError:
+ CAPACITY_IMP_ERR = traceback.format_exc()
+ HAS_CAPACITY = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_pool, get_system
+
+
+@api_wrapper
+def create_pool(module, system):
+ """Create Pool"""
+ name = module.params['name']
+ size = module.params['size']
+ vsize = module.params['vsize']
+ ssd_cache = module.params['ssd_cache']
+ compression = module.params['compression']
+
+ if not module.check_mode:
+ if not size and not vsize:
+ pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity('1TB'))
+ elif size and not vsize:
+ pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(size))
+ elif not size and vsize:
+ pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity(vsize))
+ else:
+ pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(vsize))
+ # Default value of ssd_cache is True. Disable ssd caching if False
+ if not ssd_cache:
+ pool.update_ssd_enabled(ssd_cache)
+ # Default value of compression is True. Disable compression if False
+ if not compression:
+ pool.update_compression_enabled(compression)
+
+ module.exit_json(changed=True, msg='Pool created')
+
+
+@api_wrapper
+def update_pool(module, system, pool):
+ """Update Pool"""
+ changed = False
+
+ size = module.params['size']
+ vsize = module.params['vsize']
+ #ssd_cache = module.params['ssd_cache']
+ compression = module.params['compression']
+
+ # Roundup the capacity to mimic Infinibox behaviour
+ if size:
+ physical_capacity = Capacity(size).roundup(6 * 64 * KiB)
+ if pool.get_physical_capacity() != physical_capacity:
+ if not module.check_mode:
+ pool.update_physical_capacity(physical_capacity)
+ changed = True
+
+ if vsize:
+ virtual_capacity = Capacity(vsize).roundup(6 * 64 * KiB)
+ if pool.get_virtual_capacity() != virtual_capacity:
+ if not module.check_mode:
+ pool.update_virtual_capacity(virtual_capacity)
+ changed = True
+
+ #if pool.is_ssd_enabled() != ssd_cache:
+ # if not module.check_mode:
+ # pool.update_ssd_enabled(ssd_cache)
+ # changed = True
+
+ if pool.is_compression_enabled() != compression:
+ if not module.check_mode:
+ pool.update_compression_enabled(compression)
+ changed = True
+
+ if changed:
+ msg = 'Pool updated'
+ else:
+ msg = 'Pool did not require updating'
+ module.exit_json(changed=changed, msg=msg)
+
+
+@api_wrapper
+def delete_pool(module, pool):
+ """Delete Pool"""
+ if not module.check_mode:
+ pool.delete()
+ msg = 'Pool deleted'
+ module.exit_json(changed=True, msg=msg)
+
+
+def get_sys_pool(module):
+ system = get_system(module)
+ pool = get_pool(module, system)
+ return (system, pool)
+
+
+def handle_stat(module):
+ system, pool = get_sys_pool(module)
+ if not pool:
+ module.fail_json(msg='Pool {0} not found'.format(module.params['name']))
+ fields = pool.get_fields() #from_cache=True, raw_value=True)
+ print('fields: {0}'.format(fields))
+ free_physical_capacity = fields.get('free_physical_capacity', None)
+ pool_id = fields.get('id', None)
+
+ result = dict(
+ changed=False,
+ free_physical_capacity=str(free_physical_capacity),
+ id=pool_id,
+ msg='Pool stat found'
+ )
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, pool = get_sys_pool(module)
+ if not pool:
+ create_pool(module, system)
+ module.exit_json(changed=True, msg="Pool created")
+ else:
+ changed = update_pool(module, system, pool)
+ module.exit_json(changed=changed, msg="Pool updated")
+
+
+def handle_absent(module):
+ system, pool = get_sys_pool(module)
+ if not pool:
+ module.exit_json(changed=False, msg="Pool already absent")
+ else:
+ delete_pool(module, pool)
+ module.exit_json(changed=True, msg="Pool removed")
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ size=dict(),
+ vsize=dict(),
+ ssd_cache=dict(type='bool', default=True),
+ compression=dict(type='bool', default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+ if not HAS_CAPACITY:
+ module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR)
+
+ if module.params['size']:
+ try:
+ Capacity(module.params['size'])
+ except Exception:
+ module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+
+ if module.params['vsize']:
+ try:
+ Capacity(module.params['vsize'])
+ except Exception:
+ module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
+
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
new file mode 100644
index 00000000..2d55ae6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_port
+version_added: 2.10
+short_description: Add and Delete fiber channel and iSCSI ports to a host on Infinibox
+description:
+ - This module adds or deletes fiber channel or iSCSI ports to hosts on
+ Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Host Name
+ required: true
+ state:
+ description:
+ - Creates mapping when present, removes when absent, or provides
+ details of a mapping when stat.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ wwns:
+ description:
+ - List of wwns of the host
+ required: false
+ default: []
+ iqns:
+ description:
+ - List of iqns of the host
+ required: false
+ default: []
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Make sure host bar is available with wwn/iqn ports
+ infini_host:
+ name: bar.example.com
+ state: present
+ wwns:
+ - "00:00:00:00:00:00:00"
+ - "11:11:11:11:11:11:11"
+ iqns:
+ - "iqn.yyyy-mm.reverse-domain:unique-string"
+ system: ibox01
+ user: admin
+ password: secret
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_system, get_host, merge_two_dicts
+from infi.dtypes.wwn import WWN
+from infi.dtypes.iqn import make_iscsi_name
+#from infinisdk.core.exceptions import APICommandFailed
+
+@api_wrapper
+def update_ports(module, system):
+ """
+ Updated mapping of volume to host. If already mapped, exit_json with changed False.
+ """
+ changed = False
+
+ host = system.hosts.get(name=module.params['host'])
+
+ for wwn_port in module.params['wwns']:
+ wwn = WWN(wwn_port)
+ if not system.hosts.get_host_by_initiator_address(wwn) == host:
+ if not module.check_mode:
+ host.add_port(wwn)
+ changed = True
+
+ for iscsi_port in module.params['iqns']:
+ iscsi_name = make_iscsi_name(iscsi_port)
+ if not system.hosts.get_host_by_initiator_address(iscsi_name) == host:
+ if not module.check_mode:
+ host.add_port(iscsi_name)
+ changed = True
+
+ return changed
+
+
+@api_wrapper
+def delete_ports(module, system):
+ """
+ Remove ports from host.
+ """
+ changed = False
+
+ host = system.hosts.get(name=module.params['host'])
+ for wwn_port in module.params['wwns']:
+ wwn = WWN(wwn_port)
+ if system.hosts.get_host_by_initiator_address(wwn) == host:
+ if not module.check_mode:
+ host.remove_port(wwn)
+ changed = True
+ for iscsi_port in module.params['iqns']:
+ iscsi_name = make_iscsi_name(iscsi_port)
+ if system.hosts.get_host_by_initiator_address(iscsi_name) == host:
+ if not module.check_mode:
+ host.remove_port(iscsi_name)
+ changed = True
+ return changed
+
+
+def get_sys_host(module):
+ system = get_system(module)
+ host = get_host(module, system)
+ return (system, host)
+
+
+def edit_initiator_keys(host_initiators, include_key_list):
+ """
+ For each host initiator, remove keys not in the include_key_list.
+ For FCs, add a long address. This is the address with colons inserted.
+ Return the edited host initiators list.
+ """
+ trimmed_initiators = []
+ for init in host_initiators:
+ if init['type'] == 'FC' and 'address' in init.keys():
+ # Add address_long key to init whose value is the address with colons inserted.
+ address_str = str(init['address'])
+ address_iter = iter(address_str)
+ long_address = ':'.join(a+b for a, b in zip(address_iter, address_iter))
+ init['address_long'] = long_address
+
+ trimmed_item = {key:val for key, val in init.items() if key in include_key_list}
+ trimmed_initiators.append(trimmed_item)
+ return trimmed_initiators
+
+
+def find_host_initiators_data(module, system, host, initiator_type):
+ """
+ Given a host object, find its initiators that match initiator_type.
+ Only include desired initiator keys for each initiator.
+ Return the filtered and edited host initiator list.
+ """
+ request = 'initiators?page=1&page_size=1000&host_id={0}'.format(host.id)
+ #print("\nrequest:", request, "initiator_type:", initiator_type)
+ get_initiators_result = system.api.get(request, check_version=False)
+ result_code = get_initiators_result.status_code
+ if result_code != 200:
+ msg = 'get initiators REST call failed. code: {0}'.format(result_code)
+ module.fail_json(msg=msg)
+
+ # Only return initiators of the desired type.
+ host_initiators_by_type = [initiator for initiator in get_initiators_result.get_result() \
+ if initiator['type'] == initiator_type]
+
+
+ #print("host_initiators_by_type:", host_initiators_by_type)
+ #print()
+
+ # Only include certain keys in the returned initiators
+ if initiator_type == 'FC':
+ include_key_list = ['address', 'address_long', 'host_id', 'port_key', 'targets', 'type']
+ elif initiator_type == 'ISCSI':
+ include_key_list = ['address', 'host_id', 'port_key', 'targets', 'type']
+ else:
+ msg = 'Cannot search for host initiator types other than FC and ISCSI'
+ module.fail_json(msg=msg)
+ host_initiators_by_type = edit_initiator_keys(host_initiators_by_type, include_key_list)
+
+ return host_initiators_by_type
+
+
+def get_port_fields(module, system, host):
+ """
+ Return a dict with desired fields from FC and ISCSI ports associated with the host.
+ """
+ host_fc_initiators = find_host_initiators_data(module, system, host, initiator_type='FC')
+ host_iscsi_initiators = find_host_initiators_data(module, system, host, initiator_type='ISCSI')
+
+ field_dict = dict(
+ ports=[],
+ )
+
+ connectivity_lut = {
+ 0: "DISCONNECTED",
+ 1: "DEGRADED",
+ 2: "DEGRADED",
+ 3: "CONNECTED"
+ }
+
+ ports = host.get_ports()
+ for port in ports:
+ if str(type(port)) == "<class 'infi.dtypes.wwn.WWN'>":
+ found_initiator = False
+ for initiator in host_fc_initiators:
+ if initiator['address'] == str(port).replace(":", ""):
+ found_initiator = True
+ #print("initiator targets:", initiator['targets'])
+ unique_initiator_target_ids = \
+ {target['node_id'] for target in initiator['targets']}
+ port_dict = {
+ "address": str(port),
+ "address_long": initiator['address_long'],
+ "connectivity": connectivity_lut[len(unique_initiator_target_ids)],
+ "targets": initiator['targets'],
+ "type": initiator['type'],
+ }
+
+ if not found_initiator:
+ address_str = str(port)
+ address_iter = iter(address_str)
+ long_address = ':'.join(a+b for a, b in zip(address_iter, address_iter))
+ port_dict = {
+ "address": str(port),
+ "address_long": long_address,
+ "connectivity": connectivity_lut[0],
+ "targets": [],
+ "type": "FC"
+ }
+
+ field_dict['ports'].append(port_dict)
+
+ if str(type(port)) == "<class 'infi.dtypes.iqn.IQN'>":
+ found_initiator = False
+ for initiator in host_iscsi_initiators:
+ if initiator['address'] == str(port):
+ found_initiator = True
+ #print("initiator targets:", initiator['targets'])
+ unique_initiator_target_ids = \
+ {target['node_id'] for target in initiator['targets']}
+ port_dict = {
+ "address": str(port),
+ "connectivity": connectivity_lut[len(unique_initiator_target_ids)],
+ "targets": initiator['targets'],
+ "type": initiator['type'],
+ }
+
+ if not found_initiator:
+ port_dict = {
+ "address": str(port),
+ "connectivity": connectivity_lut[0],
+ "targets": [],
+ "type": "ISCSI"
+ }
+
+ field_dict['ports'].append(port_dict)
+
+ return field_dict
+
+
+def handle_stat(module):
+ """
+ Handle stat state. Fail if host is None.
+ Return json with status.
+ """
+ system, host = get_sys_host(module)
+
+ host_name = module.params['host']
+ if not host:
+ module.fail_json(msg='Host {0} not found'.format(host_name))
+
+ field_dict = get_port_fields(module, system, host)
+ result = dict(
+ changed=False,
+ msg='Host {0} ports found'.format(host_name),
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """
+ Handle present state. Fail if host is None.
+ """
+ system, host = get_sys_host(module)
+
+ host_name = module.params['host']
+ if not host:
+ module.fail_json(msg='Host {0} not found'.format(host_name))
+
+ changed = update_ports(module, system)
+ if changed:
+ msg = 'Mapping created for host {0}'.format(host.get_name())
+ else:
+ msg = 'No mapping changes were required for host {0}'.format(host.get_name())
+
+ result = dict(
+ changed=changed,
+ msg=msg,
+ )
+ module.exit_json(**result)
+
+
+def handle_absent(module):
+ """
+ Handle absent state. Fail if host is None.
+ """
+ system, host = get_sys_host(module)
+ if not host:
+ module.exit_json(changed=False, msg='Host {0} not found'.format(
+ module.params['host']))
+
+ changed = delete_ports(module, system)
+ if changed:
+ msg = 'Mapping removed from host {0}'.format(host.get_name())
+ else:
+ msg = 'No mapping changes were required. Mapping already removed from host {0}'.format(host.get_name())
+
+ result = dict(
+ changed=changed,
+ msg=msg,
+ )
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """
+ Handle states. Always logout.
+ """
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ pass
+
+
+def main():
+ """
+ Gather auguments and manage mapping of vols to hosts.
+ """
+ argument_spec = infinibox_argument_spec()
+ null_list = list()
+ argument_spec.update(
+ dict(
+ host=dict(required=True),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ wwns=dict(type='list', default=list()),
+ iqns=dict(type='list', default=list()),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
new file mode 100644
index 00000000..df5ce5ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+from infi.dtypes.iqn import make_iscsi_name
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_user
+version_added: '2.10'
+short_description: Create, Delete and Modify a User on Infinibox
+description:
+ - This module creates, deletes or modifies a user on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ user_name:
+ description:
+ - The new user's Name. Once a user is created, the user_name may not be
+ changed from this module. It may be changed from the UI or from
+ infinishell.
+ required: true
+ user_email:
+ description:
+ - The new user's Email address
+ required: true
+ user_password:
+ description:
+ - The new user's password
+ required: true
+ user_role:
+ description:
+ - The user's role
+ required: true
+ choices: [ "admin", "pool_admin", "read_only" ]
+ user_enabled:
+ description:
+ - Specify whether to enable the user
+ type: bool
+ required: false
+ default: true
+ user_pool:
+ description:
+ - Use with role==pool_admin. Specify the new user's pool.
+ required: False
+ state:
+ description:
+ - Creates/Modifies user when present or removes when absent
+ required: false
+ default: present
+ choices: [ "stat", "reset_password", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+'''
+
+EXAMPLES = r'''
+- name: Create new user
+ infini_user:
+ user_name: foo_user
+ user_email: foo@example.com
+ user_password: secret2
+ user_role: pool_admin
+ user_enabled: false
+ pool: foo_pool
+ state: present
+ password: secret1
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \
+ get_system, get_user, get_pool, unixMillisecondsToDate, merge_two_dicts
+
+
+@api_wrapper
+def create_user(module, system):
+ if not module.check_mode:
+ user = system.users.create(name=module.params['user_name'],
+ password=module.params['user_password'],
+ email=module.params['user_email'],
+ enabled=module.params['user_enabled'],
+ )
+ # Set the user's role
+ user.update_role(module.params['user_role'])
+ if module.params['user_pool']:
+ assert module.params['user_role'] == 'pool_admin', \
+ 'user_pool set, but role is not pool_admin'
+ # Add the user to the pool's owners
+ pool = system.pools.get(name=module.params['user_pool'])
+ add_user_to_pool_owners(user, pool)
+ changed = True
+ return changed
+
+
+def add_user_to_pool_owners(user, pool):
+ """
+ Find the current list of pool owners and add user using pool.set_owners().
+ set_owners() replaces the current owners with the list of new owners. So,
+ get owners, add user, then set owners. Further, we need to know if the
+ owners changed. Use sets of owners to compare.
+ """
+ #print("add_user_to_pool_owners(): start")
+ changed = False
+ pool_fields = pool.get_fields(from_cache=True, raw_value=True)
+ pool_owners = pool_fields.get('owners', [])
+ #print('pool_owners:', pool_owners, 'pool_owners type:', type(pool_owners))
+ #print('user:', user)
+ #print('pool:', pool)
+ pool_owners_set = set(pool_owners)
+ #print('pool_owners_set:', pool_owners_set)
+ new_pool_owners_set = pool_owners_set.copy()
+ new_pool_owners_set.add(user.id)
+ #print('new_pool_owners_set:', new_pool_owners_set)
+ if pool_owners_set != new_pool_owners_set:
+ pool.set_owners([user]) #(pool_owners.append(user))
+ changed = True
+ #print("changed:", changed)
+ #print("add_user_to_pool_owners(): end")
+ return changed
+
+
+def remove_user_from_pool_owners(user, pool):
+ changed = False
+ pool_fields = pool.get_fields(from_cache=True, raw_value=True)
+ pool_owners = pool_fields.get('owners', [])
+ try:
+ pool_owners.remove(user)
+ pool.set_owners(pool_owners)
+ changed = True
+ except ValueError:
+ pass # User is not a pool owner
+ return changed
+
+
+@api_wrapper
+def update_user(module, system, user):
+ #print("update_user()")
+ assert user is not None, "Cannot update user. User not found."
+ changed = False
+ fields = user.get_fields(from_cache=True, raw_value=True)
+ if fields.get('role') != module.params['user_role'].upper():
+ user.update_field('role', module.params['user_role'])
+ changed = True
+ if fields.get('enabled') != module.params['user_enabled']:
+ user.update_field('enabled', module.params['user_enabled'])
+ changed = True
+ if fields.get('email') != module.params['user_email']:
+ user.update_field('email', module.params['user_email'])
+ changed = True
+
+ if module.params['user_pool']:
+ try:
+ pool_name = module.params['user_pool']
+ pool = system.pools.get(name=pool_name)
+ except Exception as err:
+ module.fail_json(msg='Cannot find pool {0}: {1}'.format(pool_name, err))
+ if add_user_to_pool_owners(user, pool):
+ changed = True
+ return changed
+
+
+@api_wrapper
+def reset_user_password(module, system, user):
+ #print("update_user()")
+ assert user is not None, "Cannot change user password. User not found."
+ user.update_password(module.params['user_password'])
+
+
+@api_wrapper
+def delete_user(module, user):
+ if not user:
+ return False
+
+ changed = True
+ if not module.check_mode:
+ # May raise APICommandFailed if mapped, etc.
+ user.delete()
+ return changed
+
+
+def get_sys_user(module):
+ system = get_system(module)
+ user = get_user(module, system)
+ #print("get_sys_user(): user:", user)
+ return (system, user)
+
+
+def get_user_fields(user):
+ pools = user.get_owned_pools()
+ pool_names = [pool.get_field('name') for pool in pools]
+
+ fields = user.get_fields(from_cache=True, raw_value=True)
+ field_dict = dict(
+ id=user.id,
+ enabled=fields.get('enabled', None),
+ role=fields.get('role', None),
+ email=fields.get('email', None),
+ pools=pool_names,
+ )
+ return field_dict
+
+
+def handle_stat(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ module.fail_json(msg='User {0} not found'.format(user_name))
+ field_dict = get_user_fields(user)
+ result = dict(
+ changed=False,
+ msg='User stat found'
+ )
+ result = merge_two_dicts(result, field_dict)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ changed = create_user(module, system)
+ msg='User {0} created'.format(user_name)
+ else:
+ changed = update_user(module, system, user)
+ if changed:
+ msg='User {0} updated'.format(user_name)
+ else:
+ msg='User {0} update required no changes'.format(user_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ changed = False
+ msg="User {0} already absent".format(user_name)
+ else:
+ changed = delete_user(module, user)
+ msg="User {0} removed".format(user_name)
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_reset_password(module):
+ system, user = get_sys_user(module)
+ user_name = module.params["user_name"]
+ if not user:
+ msg = 'Cannot change password. User {0} not found'.format(user_name)
+ module.fail_json(msg=msg)
+ else:
+ reset_user_password(module, system, user)
+ msg='User {0} password changed'.format(user_name)
+ module.exit_json(changed=True, msg=msg)
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ elif state == 'reset_password':
+ handle_reset_password(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ state = module.params['state']
+ user_role = module.params['user_role']
+ user_pool = module.params['user_pool']
+ if state == 'present':
+ if user_role == 'pool_admin' and not user_pool:
+ module.fail_json(msg='user_role "pool_admin" requires a user_pool to be provided')
+ if user_role != 'pool_admin' and user_pool:
+ module.fail_json(msg='Only user_role "pool_admin" should have a user_pool provided')
+
+ valid_keys = ['user_email', 'user_password', 'user_role', 'user_enabled']
+ for valid_key in valid_keys:
+ try:
+ _ = module.params[valid_key]
+ except KeyError:
+ msg = 'For state "present", options {0} are required'.format(", ".join(valid_keys))
+ module.fail_json(msg=msg)
+ elif state == 'reset_password':
+ if not module.params['user_password']:
+ msg = 'For state "reset_password", user_password is required'
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ user_name=dict(required=True),
+ user_email=dict(required=False),
+ user_password=dict(required=False, no_log=True),
+ user_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only']),
+ user_enabled=dict(required=False, type='bool'),
+ user_pool=dict(required=False),
+ state=dict(default='present', choices=['stat', 'reset_password', 'present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
new file mode 100644
index 00000000..1689316f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: infini_vol
+version_added: 2.3
+short_description: Create, Delete or Modify volumes on Infinibox
+description:
+ - This module creates, deletes or modifies a volume on Infinibox.
+author: Gregory Shulov (@GR360RY)
+options:
+ name:
+ description:
+ - Volume Name
+ required: true
+ state:
+ description:
+ - Creates/Modifies master volume or snapshot when present or removes when absent.
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ size:
+ description:
+ - Volume size in MB, GB or TB units. Required for creating a master volume, but not a snapshot
+ required: false
+ snapshot_lock_expires_at:
+ description:
+ - This will cause a snapshot to be locked at the specified date-time.
+ Uses python's datetime format YYYY-mm-dd HH:MM:SS.ffffff, e.g. 2020-02-13 16:21:59.699700
+ required: false
+ snapshot_lock_only:
+ description:
+ - This will lock an existing snapshot but will suppress refreshing the snapshot.
+ type: bool
+ required: false
+ default: false
+ thin_provision:
+ description:
+ - Whether the master volume should be thin provisioned. Required for creating a master volume, but not a snapshot.
+ type: bool
+ required: false
+ default: true
+ version_added: '2.8'
+ pool:
+ description:
+ - Pool that master volume will reside within. Required for creating a master volume, but not a snapshot.
+ required: false
+ volume_type:
+ description:
+ - Specifies the volume type, regular volume or snapshot.
+ required: false
+ default: master
+ choices: [ "master", "snapshot" ]
+ parent_volume_name:
+ description:
+ - Specify a volume name. This is the volume parent for creating a snapshot. Required if volume_type is snapshot.
+ required: false
+extends_documentation_fragment:
+ - infinibox
+requirements:
+ - capacity
+'''
+
+EXAMPLES = r'''
+- name: Create new volume named foo under pool named bar
+ infini_vol:
+ name: foo
+ # volume_type: master # Default
+ size: 1TB
+ thin_provision: yes
+ pool: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Create snapshot named foo_snap from volume named foo
+ infini_vol:
+ name: foo_snap
+ volume_type: snapshot
+ parent_volume_name: foo
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Stat snapshot, also a volume, named foo_snap
+ infini_vol:
+ name: foo_snap
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Remove snapshot, also a volume, named foo_snap
+ infini_vol:
+ name: foo_snap
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+'''
+
+# RETURN = r''' # '''
+
+try:
+ from capacity import KiB, Capacity
+ HAS_CAPACITY = True
+except ImportError:
+ HAS_CAPACITY = False
+
+import arrow
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.infinibox import \
+ HAS_INFINISDK, api_wrapper, infinibox_argument_spec, ObjectNotFound, \
+ get_pool, get_system, get_volume
+
+
+@api_wrapper
+def create_volume(module, system):
+ """Create Volume"""
+ if not module.check_mode:
+ if module.params['thin_provision']:
+ prov_type = 'THIN'
+ else:
+ prov_type = 'THICK'
+ pool = get_pool(module, system)
+ volume = system.volumes.create(name=module.params['name'], provtype=prov_type, pool=pool)
+
+ if module.params['size']:
+ size = Capacity(module.params['size']).roundup(64 * KiB)
+ volume.update_size(size)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_volume(module, volume):
+ """Update Volume"""
+ changed = False
+ if module.params['size']:
+ size = Capacity(module.params['size']).roundup(64 * KiB)
+ if volume.get_size() != size:
+ if not module.check_mode:
+ volume.update_size(size)
+ changed = True
+ if module.params['thin_provision'] is not None:
+ type = str(volume.get_provisioning())
+ if type == 'THICK' and module.params['thin_provision']:
+ if not module.check_mode:
+ volume.update_provisioning('THIN')
+ changed = True
+ if type == 'THIN' and not module.params['thin_provision']:
+ if not module.check_mode:
+ volume.update_provisioning('THICK')
+ changed = True
+ return changed
+
+
+@api_wrapper
+def delete_volume(module, volume):
+ """ Delete Volume. Volume could be a snapshot."""
+ if not module.check_mode:
+ volume.delete()
+ changed = True
+ return True
+
+
+@api_wrapper
+def create_snapshot(module, system):
+ """Create Snapshot from parent volume"""
+ snapshot_name = module.params['name']
+ parent_volume_name = module.params['parent_volume_name']
+ try:
+ parent_volume = system.volumes.get(name=parent_volume_name)
+ except ObjectNotFound as err:
+ msg = 'Cannot create snapshot {}. Parent volume {} not found'.format(
+ snapshot_name,
+ parent_volume_name)
+ module.fail_json(msg=msg)
+ if not parent_volume:
+ msg = "Cannot find new snapshot's parent volume named {}".format(parent_volume_name)
+ module.fail_json(msg=msg)
+ if not module.check_mode:
+ if module.params['snapshot_lock_only']:
+ msg = "Snapshot does not exist. Cannot comply with 'snapshot_lock_only: true'."
+ module.fail_json(msg=msg)
+ check_snapshot_lock_options(module)
+ snapshot = parent_volume.create_snapshot(name=snapshot_name)
+ manage_snapshot_locks(module, snapshot)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_snapshot(module, snapshot):
+ """
+ Update/refresh snapshot. May also lock it.
+ """
+ refresh_changed = False
+ if not module.params['snapshot_lock_only']:
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ if not snap_is_locked:
+ if not module.check_mode:
+ snapshot.refresh_snapshot()
+ refresh_changed = True
+ else:
+ msg = "Snapshot is locked and may not be refreshed"
+ module.fail_json(msg=msg)
+
+ check_snapshot_lock_options(module)
+ lock_changed = manage_snapshot_locks(module, snapshot)
+
+ return refresh_changed or lock_changed
+
+
+def get_sys_pool_vol_parname(module):
+ system = get_system(module)
+ pool = get_pool(module, system)
+ volume = get_volume(module, system)
+ parname = module.params['parent_volume_name']
+ return (system, pool, volume, parname)
+
+
+def check_snapshot_lock_options(module):
+ """
+ Check if specified options are feasible for a snapshot.
+
+ Prevent very long lock times.
+ max_delta_minutes limits locks to 30 days (43200 minutes).
+
+ This functionality is broken out from manage_snapshot_locks() to allow
+ it to be called by create_snapshot() before the snapshot is actually
+ created.
+ """
+ snapshot_lock_expires_at = module.params['snapshot_lock_expires_at']
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+
+ # Check for lock in the past
+ now = arrow.utcnow()
+ if lock_expires_at <= now:
+ msg = "Cannot lock snapshot with a snapshot_lock_expires_at "
+ msg += "of '{}' from the past".format(snapshot_lock_expires_at)
+ module.fail_json(msg=msg)
+
+ # Check for lock later than max lock, i.e. too far in future.
+ max_delta_minutes = 43200 # 30 days in minutes
+ max_lock_expires_at = now.shift(minutes=max_delta_minutes)
+ if lock_expires_at >= max_lock_expires_at:
+ msg = "snapshot_lock_expires_at exceeds {} days in the future".format(
+ max_delta_minutes//24//60)
+ module.fail_json(msg=msg)
+
+
+def manage_snapshot_locks(module, snapshot):
+ """
+ Manage the locking of a snapshot. Check for bad lock times.
+ See check_snapshot_lock_options() which has additional checks.
+ """
+ name = module.params["name"]
+ snapshot_lock_expires_at = module.params['snapshot_lock_expires_at']
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ current_lock_expires_at = snapshot.get_lock_expires_at()
+ changed = False
+
+ check_snapshot_lock_options(module)
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+ if snap_is_locked and lock_expires_at < current_lock_expires_at:
+ # Lock earlier than current lock
+ msg = "snapshot_lock_expires_at '{}' preceeds the current lock time of '{}'".format(
+ lock_expires_at,
+ current_lock_expires_at)
+ module.fail_json(msg=msg)
+ elif snap_is_locked and lock_expires_at == current_lock_expires_at:
+ # Lock already set to correct time
+ pass
+ else:
+ # Set lock
+ if not module.check_mode:
+ snapshot.update_lock_expires_at(lock_expires_at)
+ changed = True
+ return changed
+
+
+def handle_stat(module):
+ system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ if not volume:
+ msg = "Volume {} not found. Cannot stat.".format(module.params['name'])
+ module.fail_json(msg=msg)
+ fields = volume.get_fields() #from_cache=True, raw_value=True)
+ created_at = str(fields.get('created_at', None))
+ has_children = fields.get('has_children', None)
+ lock_expires_at= str(volume.get_lock_expires_at())
+ lock_state = volume.get_lock_state()
+ mapped = str(fields.get('mapped', None))
+ name = fields.get('name', None)
+ parent_id = fields.get('parent_id', None)
+ size = str(volume.get_size())
+ updated_at = str(fields.get('updated_at', None))
+ used = str(fields.get('used_size', None))
+ volume_id = fields.get('id', None)
+ volume_type = fields.get('type', None)
+ if volume_type == 'SNAPSHOT':
+ msg = 'Snapshot stat found'
+ else:
+ msg = 'Volume stat found'
+
+ result = dict(
+ changed=False,
+ created_at=created_at,
+ has_children=has_children,
+ lock_expires_at=lock_expires_at,
+ lock_state=lock_state,
+ mapped=mapped,
+ msg=msg,
+ parent_id=parent_id,
+ size=size,
+ updated_at=updated_at,
+ used=used,
+ volume_id=volume_id,
+ volume_type=volume_type,
+ )
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ volume_type = module.params['volume_type']
+ if volume_type == 'master':
+ if not volume:
+ changed = create_volume(module, system)
+ module.exit_json(changed=changed, msg="Volume created")
+ else:
+ changed = update_volume(module, volume)
+ module.exit_json(changed=changed, msg="Volume updated")
+ elif volume_type == 'snapshot':
+ snapshot = volume
+ if not snapshot:
+ changed = create_snapshot(module, system)
+ module.exit_json(changed=changed, msg="Snapshot created")
+ else:
+ changed = update_snapshot(module, snapshot)
+ module.exit_json(changed=changed, msg="Snapshot updated")
+ else:
+ module.fail_json(msg='A programming error has occurred')
+
+
+def handle_absent(module):
+ system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ volume_type = module.params['volume_type']
+
+ if volume and volume.get_lock_state() == "LOCKED":
+ msg = "Cannot delete snapshot. Locked."
+ module.fail_json(msg=msg)
+
+ if volume_type == 'master':
+ if not volume:
+ module.exit_json(changed=False, msg="Volume already absent")
+ else:
+ changed = delete_volume(module, volume)
+ module.exit_json(changed=changed, msg="Volume removed")
+ elif volume_type == 'snapshot':
+ if not volume:
+ module.exit_json(changed=False, msg="Snapshot already absent")
+ else:
+ snapshot = volume
+ changed = delete_volume(module, snapshot)
+ module.exit_json(changed=changed, msg="Snapshot removed")
+ else:
+ module.fail_json(msg='A programming error has occured')
+
+
+def execute_state(module):
+ state = module.params['state']
+ try:
+ if state == 'stat':
+ handle_stat(module)
+ elif state == 'present':
+ handle_present(module)
+ elif state == 'absent':
+ handle_absent(module)
+ else:
+ module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ state = module.params['state']
+ size = module.params['size']
+ pool = module.params['pool']
+ volume_type = module.params['volume_type']
+ parent_volume_name = module.params['parent_volume_name']
+ if state == 'present':
+ if volume_type == 'master':
+ if state == 'present':
+ if parent_volume_name:
+ msg = "parent_volume_name should not be specified "
+ msg += "if volume_type is 'volume'. Snapshots only."
+ module.fail_json(msg=msg)
+ if not size:
+ msg = "Size is required to create a volume"
+ module.fail_json(msg=msg)
+ elif volume_type == "snapshot":
+ if size or pool:
+ msg = "Neither pool nor size should not be specified "
+ msg += "for volume_type snapshot"
+ module.fail_json(msg=msg)
+ if state == "present":
+ if not parent_volume_name:
+ msg = "For state 'present' and volume_type 'snapshot', "
+ msg += "parent_volume_name is required"
+ module.fail_json(msg=msg)
+ else:
+ msg = "A programming error has occurred"
+ module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ parent_volume_name=dict(required=False),
+ pool=dict(required=False),
+ size=dict(),
+ snapshot_lock_expires_at=dict(),
+ snapshot_lock_only=dict(type='bool', default=False),
+ state=dict(default='present', choices=['stat', 'present', 'absent']),
+ thin_provision=dict(type='bool', default=True),
+ volume_type=dict(default='master', choices=['master', 'snapshot']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib('infinisdk'))
+
+ if module.params['size']:
+ try:
+ Capacity(module.params['size'])
+ except Exception:
+ module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()