summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/general/plugins/modules
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/community/general/plugins/modules')
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_lvol.py19
-rw-r--r--ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py60
-rw-r--r--ansible_collections/community/general/plugins/modules/apt_rpm.py30
-rw-r--r--ansible_collections/community/general/plugins/modules/btrfs_subvolume.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/cloudflare_dns.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_policy.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_role.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_session.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_token.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/cpanm.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/django_command.py83
-rw-r--r--ansible_collections/community/general/plugins/modules/django_manage.py92
-rw-r--r--ansible_collections/community/general/plugins/modules/flowdock.py211
-rw-r--r--ansible_collections/community/general/plugins/modules/gandi_livedns.py37
-rw-r--r--ansible_collections/community/general/plugins/modules/gconftool2.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/gconftool2_info.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_runner.py53
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew.py157
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew_cask.py70
-rw-r--r--ansible_collections/community/general/plugins/modules/hponcfg.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/installp.py13
-rw-r--r--ansible_collections/community/general/plugins/modules/irc.py31
-rw-r--r--ansible_collections/community/general/plugins/modules/kernel_blacklist.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_client.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_user_federation.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_search.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/locale_gen.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/lvg.py22
-rw-r--r--ansible_collections/community/general/plugins/modules/lvol.py84
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_container.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/macports.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/mksysb.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/opkg.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/parted.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/pipx.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/pipx_info.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/pkg5.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgin.py25
-rw-r--r--ansible_collections/community/general/plugins/modules/portinstall.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox.py47
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_disk.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_domain_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_group_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_kvm.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_nic.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_node_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_pool.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_pool_member.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_snap.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_storage_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_template.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_user_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_vm_info.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/puppet.py19
-rw-r--r--ansible_collections/community/general/plugins/modules/rax.py903
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cbs.py235
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py226
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb.py266
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb_database.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb_user.py227
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb.py320
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb_nodes.py291
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb_ssl.py289
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_dns.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_dns_record.py358
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_facts.py152
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_files.py400
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_files_objects.py556
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_identity.py110
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_keypair.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_meta.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_alarm.py235
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_check.py329
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_entity.py201
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_notification.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py191
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_network.py146
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_queue.py147
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_scaling_group.py441
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_scaling_policy.py294
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_command.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_config.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_info.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/redhat_subscription.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/slackpkg.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/snap.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/snap_alias.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/stackdriver.py228
-rw-r--r--ansible_collections/community/general/plugins/modules/svr4pkg.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/swdepot.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_app.py213
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_db.py209
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_domain.py184
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_mailbox.py152
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_site.py223
-rw-r--r--ansible_collections/community/general/plugins/modules/xfconf.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/xfconf_info.py3
100 files changed, 596 insertions, 9227 deletions
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvol.py b/ansible_collections/community/general/plugins/modules/aix_lvol.py
index 1e7b42568..7d0fb1ee0 100644
--- a/ansible_collections/community/general/plugins/modules/aix_lvol.py
+++ b/ansible_collections/community/general/plugins/modules/aix_lvol.py
@@ -240,8 +240,6 @@ def main():
state = module.params['state']
pvs = module.params['pvs']
- pv_list = ' '.join(pvs)
-
if policy == 'maximum':
lv_policy = 'x'
else:
@@ -249,16 +247,16 @@ def main():
# Add echo command when running in check-mode
if module.check_mode:
- test_opt = 'echo '
+ test_opt = [module.get_bin_path("echo", required=True)]
else:
- test_opt = ''
+ test_opt = []
# check if system commands are available
lsvg_cmd = module.get_bin_path("lsvg", required=True)
lslv_cmd = module.get_bin_path("lslv", required=True)
# Get information on volume group requested
- rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
+ rc, vg_info, err = module.run_command([lsvg_cmd, vg])
if rc != 0:
if state == 'absent':
@@ -273,8 +271,7 @@ def main():
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
# Get information on logical volume requested
- rc, lv_info, err = module.run_command(
- "%s %s" % (lslv_cmd, lv))
+ rc, lv_info, err = module.run_command([lslv_cmd, lv])
if rc != 0:
if state == 'absent':
@@ -296,7 +293,7 @@ def main():
# create LV
mklv_cmd = module.get_bin_path("mklv", required=True)
- cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
+ cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, "%sM" % (lv_size, )] + pvs
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
@@ -306,7 +303,7 @@ def main():
if state == 'absent':
# remove LV
rmlv_cmd = module.get_bin_path("rmlv", required=True)
- rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
+ rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']])
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
else:
@@ -315,7 +312,7 @@ def main():
if this_lv['policy'] != policy:
# change lv allocation policy
chlv_cmd = module.get_bin_path("chlv", required=True)
- rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
+ rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']])
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
else:
@@ -331,7 +328,7 @@ def main():
# resize LV based on absolute values
if int(lv_size) > this_lv['size']:
extendlv_cmd = module.get_bin_path("extendlv", required=True)
- cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
+ cmd = test_opt + [extendlv_cmd, lv, "%sM" % (lv_size - this_lv['size'], )]
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
diff --git a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
index 3b0a8fd47..d382ed93a 100644
--- a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
+++ b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
@@ -73,16 +73,6 @@ options:
- Using O(force=true) is mandatory when downgrading.
type: bool
default: false
- ack_ansible29:
- description:
- - This option has no longer any effect and will be removed in community.general 9.0.0.
- type: bool
- default: false
- ack_min_ansiblecore211:
- description:
- - This option has no longer any effect and will be removed in community.general 9.0.0.
- type: bool
- default: false
"""
EXAMPLES = """
@@ -181,7 +171,7 @@ RETURN = """
import re
-from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException
@@ -190,7 +180,9 @@ class AnsibleGalaxyInstall(ModuleHelper):
_RE_LIST_PATH = re.compile(r'^# (?P<path>.*)$')
_RE_LIST_COLL = re.compile(r'^(?P<elem>\w+\.\w+)\s+(?P<version>[\d\.]+)\s*$')
_RE_LIST_ROLE = re.compile(r'^- (?P<elem>\w+\.\w+),\s+(?P<version>[\d\.]+)\s*$')
- _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__()
+ _RE_INSTALL_OUTPUT = re.compile(
+ r'^(?:(?P<collection>\w+\.\w+):(?P<cversion>[\d\.]+)|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\)) was installed successfully$'
+ )
ansible_version = None
output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps')
@@ -202,35 +194,24 @@ class AnsibleGalaxyInstall(ModuleHelper):
dest=dict(type='path'),
force=dict(type='bool', default=False),
no_deps=dict(type='bool', default=False),
- ack_ansible29=dict(
- type='bool',
- default=False,
- removed_in_version='9.0.0',
- removed_from_collection='community.general',
- ),
- ack_min_ansiblecore211=dict(
- type='bool',
- default=False,
- removed_in_version='9.0.0',
- removed_from_collection='community.general',
- ),
),
mutually_exclusive=[('name', 'requirements_file')],
required_one_of=[('name', 'requirements_file')],
required_if=[('type', 'both', ['requirements_file'])],
supports_check_mode=False,
)
+ use_old_vardict = False
command = 'ansible-galaxy'
command_args_formats = dict(
- type=fmt.as_func(lambda v: [] if v == 'both' else [v]),
- galaxy_cmd=fmt.as_list(),
- requirements_file=fmt.as_opt_val('-r'),
- dest=fmt.as_opt_val('-p'),
- force=fmt.as_bool("--force"),
- no_deps=fmt.as_bool("--no-deps"),
- version=fmt.as_bool("--version"),
- name=fmt.as_list(),
+ type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]),
+ galaxy_cmd=cmd_runner_fmt.as_list(),
+ requirements_file=cmd_runner_fmt.as_opt_val('-r'),
+ dest=cmd_runner_fmt.as_opt_val('-p'),
+ force=cmd_runner_fmt.as_bool("--force"),
+ no_deps=cmd_runner_fmt.as_bool("--no-deps"),
+ version=cmd_runner_fmt.as_fixed("--version"),
+ name=cmd_runner_fmt.as_list(),
)
def _make_runner(self, lang):
@@ -254,25 +235,18 @@ class AnsibleGalaxyInstall(ModuleHelper):
try:
runner = self._make_runner("C.UTF-8")
with runner("version", check_rc=False, output_process=process) as ctx:
- return runner, ctx.run(version=True)
- except UnsupportedLocale as e:
+ return runner, ctx.run()
+ except UnsupportedLocale:
runner = self._make_runner("en_US.UTF-8")
with runner("version", check_rc=True, output_process=process) as ctx:
- return runner, ctx.run(version=True)
+ return runner, ctx.run()
def __init_module__(self):
- # self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang)
self.runner, self.ansible_version = self._get_ansible_galaxy_version()
if self.ansible_version < (2, 11):
self.module.fail_json(
- msg="Support for Ansible 2.9 and ansible-base 2.10 has ben removed."
+ msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed."
)
- # Collection install output changed:
- # ansible-base 2.10: "coll.name (x.y.z)"
- # ansible-core 2.11+: "coll.name:x.y.z"
- self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P<collection>\w+\.\w+)(?: \(|:)(?P<cversion>[\d\.]+)\)?'
- r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\))'
- r' was installed successfully$')
self.vars.set("new_collections", {}, change=True)
self.vars.set("new_roles", {}, change=True)
if self.vars.type != "collection":
diff --git a/ansible_collections/community/general/plugins/modules/apt_rpm.py b/ansible_collections/community/general/plugins/modules/apt_rpm.py
index 03b87e78f..3a0b6d805 100644
--- a/ansible_collections/community/general/plugins/modules/apt_rpm.py
+++ b/ansible_collections/community/general/plugins/modules/apt_rpm.py
@@ -170,7 +170,7 @@ def local_rpm_package_name(path):
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
- rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
+ rc, out, err = module.run_command([RPM_PATH, "-q", name])
if rc == 0:
return True
else:
@@ -203,7 +203,7 @@ def query_package_provides(module, name, allow_upgrade=False):
name = local_rpm_package_name(name)
- rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
+ rc, out, err = module.run_command([RPM_PATH, "-q", "--provides", name])
if rc == 0:
if not allow_upgrade:
return True
@@ -253,7 +253,7 @@ def remove_packages(module, packages):
if not query_package(module, package):
continue
- rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package), environ_update={"LANG": "C"})
+ rc, out, err = module.run_command([APT_PATH, "-y", "remove", package], environ_update={"LANG": "C"})
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
@@ -271,14 +271,14 @@ def install_packages(module, pkgspec, allow_upgrade=False):
if pkgspec is None:
return (False, "Empty package list")
- packages = ""
+ packages = []
for package in pkgspec:
if not query_package_provides(module, package, allow_upgrade=allow_upgrade):
- packages += "'%s' " % package
+ packages.append(package)
- if len(packages) != 0:
-
- rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"})
+ if packages:
+ command = [APT_PATH, "-y", "install"] + packages
+ rc, out, err = module.run_command(command, environ_update={"LANG": "C"})
installed = True
for package in pkgspec:
@@ -287,7 +287,7 @@ def install_packages(module, pkgspec, allow_upgrade=False):
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
- module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ module.fail_json(msg="'%s' failed: %s" % (" ".join(command), err))
else:
return (True, "%s present(s)" % packages)
else:
@@ -310,6 +310,18 @@ def main():
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
+ if p['state'] in ['installed', 'present']:
+ module.deprecate(
+ 'state=%s currently behaves unexpectedly by always upgrading to the latest version if'
+ ' the package is already installed. This behavior is deprecated and will change in'
+ ' community.general 11.0.0. You can use state=latest to explicitly request this behavior'
+ ' or state=present_not_latest to explicitly request the behavior that state=%s will have'
+ ' in community.general 11.0.0, namely that the package will not be upgraded if it is'
+ ' already installed.' % (p['state'], p['state']),
+ version='11.0.0',
+ collection_name='community.general',
+ )
+
modified = False
output = ""
diff --git a/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
index 864bb65a6..35327bfe0 100644
--- a/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
+++ b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
@@ -572,10 +572,7 @@ class BtrfsSubvolumeModule(object):
self.__temporary_mounts[cache_key] = mountpoint
mount = self.module.get_bin_path("mount", required=True)
- command = "%s -o noatime,subvolid=%d %s %s " % (mount,
- subvolid,
- device,
- mountpoint)
+ command = [mount, "-o", "noatime,subvolid=%d" % subvolid, device, mountpoint]
result = self.module.run_command(command, check_rc=True)
return mountpoint
@@ -586,10 +583,10 @@ class BtrfsSubvolumeModule(object):
def __cleanup_mount(self, mountpoint):
umount = self.module.get_bin_path("umount", required=True)
- result = self.module.run_command("%s %s" % (umount, mountpoint))
+ result = self.module.run_command([umount, mountpoint])
if result[0] == 0:
rmdir = self.module.get_bin_path("rmdir", required=True)
- self.module.run_command("%s %s" % (rmdir, mountpoint))
+ self.module.run_command([rmdir, mountpoint])
# Format and return results
def get_results(self):
diff --git a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
index d2bea4266..190497644 100644
--- a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
+++ b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
@@ -148,9 +148,9 @@ options:
type:
description:
- The type of DNS record to create. Required if O(state=present).
- - Note that V(SPF) is no longer supported by CloudFlare. Support for it will be removed from community.general 9.0.0.
+ - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by CloudFlare.
type: str
- choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, CAA, TXT ]
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT ]
value:
description:
- The record value.
@@ -674,7 +674,7 @@ class CloudflareAPI(object):
if (params['type'] is None) or (params['record'] is None):
self.module.fail_json(msg="You must provide a type and a record to create a new record")
- if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS']):
if not params['value']:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
@@ -869,7 +869,7 @@ def main():
state=dict(type='str', default='present', choices=['absent', 'present']),
timeout=dict(type='int', default=30),
ttl=dict(type='int', default=1),
- type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']),
value=dict(type='str', aliases=['content']),
weight=dict(type='int', default=1),
zone=dict(type='str', required=True, aliases=['domain']),
diff --git a/ansible_collections/community/general/plugins/modules/consul_policy.py b/ansible_collections/community/general/plugins/modules/consul_policy.py
index f020622a0..2ed6021b0 100644
--- a/ansible_collections/community/general/plugins/modules/consul_policy.py
+++ b/ansible_collections/community/general/plugins/modules/consul_policy.py
@@ -33,6 +33,8 @@ attributes:
version_added: 8.3.0
details:
- In check mode the diff will miss operational attributes.
+ action_group:
+ version_added: 8.3.0
options:
state:
description:
diff --git a/ansible_collections/community/general/plugins/modules/consul_role.py b/ansible_collections/community/general/plugins/modules/consul_role.py
index 0da71507a..e07e2036f 100644
--- a/ansible_collections/community/general/plugins/modules/consul_role.py
+++ b/ansible_collections/community/general/plugins/modules/consul_role.py
@@ -32,6 +32,8 @@ attributes:
details:
- In check mode the diff will miss operational attributes.
version_added: 8.3.0
+ action_group:
+ version_added: 8.3.0
options:
name:
description:
diff --git a/ansible_collections/community/general/plugins/modules/consul_session.py b/ansible_collections/community/general/plugins/modules/consul_session.py
index bd03b561a..87a5f1914 100644
--- a/ansible_collections/community/general/plugins/modules/consul_session.py
+++ b/ansible_collections/community/general/plugins/modules/consul_session.py
@@ -29,6 +29,8 @@ attributes:
support: none
diff_mode:
support: none
+ action_group:
+ version_added: 8.3.0
options:
id:
description:
diff --git a/ansible_collections/community/general/plugins/modules/consul_token.py b/ansible_collections/community/general/plugins/modules/consul_token.py
index eee419863..02bc544da 100644
--- a/ansible_collections/community/general/plugins/modules/consul_token.py
+++ b/ansible_collections/community/general/plugins/modules/consul_token.py
@@ -31,6 +31,8 @@ attributes:
support: partial
details:
- In check mode the diff will miss operational attributes.
+ action_group:
+ version_added: 8.3.0
options:
state:
description:
diff --git a/ansible_collections/community/general/plugins/modules/cpanm.py b/ansible_collections/community/general/plugins/modules/cpanm.py
index 20ac3e714..3beae895d 100644
--- a/ansible_collections/community/general/plugins/modules/cpanm.py
+++ b/ansible_collections/community/general/plugins/modules/cpanm.py
@@ -68,9 +68,10 @@ options:
mode:
description:
- Controls the module behavior. See notes below for more details.
- - Default is V(compatibility) but that behavior is deprecated and will be changed to V(new) in community.general 9.0.0.
+ - The default changed from V(compatibility) to V(new) in community.general 9.0.0.
type: str
choices: [compatibility, new]
+ default: new
version_added: 3.0.0
name_check:
description:
@@ -80,12 +81,16 @@ options:
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
- "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)."
- - "O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. This is the default mode.
+ - >
+ O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility.
+ This was the default mode before community.general 9.0.0.
O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version)
when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL.
- C(cpanm) version specifiers do not work in this mode."
- - "O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file,
- a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized."
+ C(cpanm) version specifiers do not work in this mode.
+ - >
+ O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file,
+ a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized.
+ This is the default mode from community.general 9.0.0 onwards.
author:
- "Franck Cuny (@fcuny)"
- "Alexei Znamensky (@russoz)"
@@ -150,7 +155,7 @@ class CPANMinus(ModuleHelper):
mirror_only=dict(type='bool', default=False),
installdeps=dict(type='bool', default=False),
executable=dict(type='path'),
- mode=dict(type='str', choices=['compatibility', 'new']),
+ mode=dict(type='str', default='new', choices=['compatibility', 'new']),
name_check=dict(type='str')
),
required_one_of=[('name', 'from_path')],
@@ -165,17 +170,10 @@ class CPANMinus(ModuleHelper):
installdeps=cmd_runner_fmt.as_bool("--installdeps"),
pkg_spec=cmd_runner_fmt.as_list(),
)
+ use_old_vardict = False
def __init_module__(self):
v = self.vars
- if v.mode is None:
- self.deprecate(
- "The default value 'compatibility' for parameter 'mode' is being deprecated "
- "and it will be replaced by 'new'",
- version="9.0.0",
- collection_name="community.general"
- )
- v.mode = "compatibility"
if v.mode == "compatibility":
if v.name_check:
self.do_raise("Parameter name_check can only be used with mode=new")
diff --git a/ansible_collections/community/general/plugins/modules/django_command.py b/ansible_collections/community/general/plugins/modules/django_command.py
new file mode 100644
index 000000000..788f4a100
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/django_command.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2024, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: django_command
+author:
+ - Alexei Znamensky (@russoz)
+short_description: Run Django admin commands
+version_added: 9.0.0
+description:
+ - This module allows the execution of arbitrary Django admin commands.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.django
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ command:
+ description:
+ - Django admin command. It must be a valid command accepted by C(python -m django) at the target system.
+ type: str
+ required: true
+ extra_args:
+ type: list
+ elements: str
+ description:
+ - List of extra arguments passed to the django admin command.
+"""
+
+EXAMPLES = """
+- name: Check the project
+ community.general.django_command:
+ command: check
+ settings: myproject.settings
+
+- name: Check the project in specified python path, using virtual environment
+ community.general.django_command:
+ command: check
+ settings: fancysite.settings
+ pythonpath: /home/joedoe/project/fancysite
+ venv: /home/joedoe/project/fancysite/venv
+"""
+
+RETURN = """
+run_info:
+ description: Command-line execution information.
+ type: dict
+ returned: success and O(verbosity) >= 3
+"""
+
+from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt
+
+
+class DjangoCommand(DjangoModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ command=dict(type="str", required=True),
+ extra_args=dict(type="list", elements="str"),
+ ),
+ supports_check_mode=False,
+ )
+ arg_formats = dict(
+ extra_args=cmd_runner_fmt.as_list(),
+ )
+ django_admin_arg_order = "extra_args"
+
+
+def main():
+ DjangoCommand.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/django_manage.py b/ansible_collections/community/general/plugins/modules/django_manage.py
index 114ec0353..352bfe4b5 100644
--- a/ansible_collections/community/general/plugins/modules/django_manage.py
+++ b/ansible_collections/community/general/plugins/modules/django_manage.py
@@ -28,23 +28,16 @@ options:
command:
description:
- The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation.
- - >
- V(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be
- removed in community.general 9.0.0. Use V(clearsessions) instead.
- V(collectstatic) - Collects the static files into C(STATIC_ROOT).
- V(createcachetable) - Creates the cache tables for use with the database cache backend.
- V(flush) - Removes all data from the database.
- V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database.
- V(migrate) - Synchronizes the database state with models and migrations.
- - >
- V(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7).
- This parameter will be removed in community.general 9.0.0. Use V(migrate) instead.
- V(test) - Runs tests for all installed apps.
- - >
- V(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be
- removed in community.general 9.0.0. Use V(check) instead.
- - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
+ - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
prompt for user input should be run with the C(--noinput) flag.
+ - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0.
+ See note about supported versions of Django.
type: str
required: true
project_path:
@@ -69,6 +62,7 @@ options:
virtualenv:
description:
- An optional path to a C(virtualenv) installation to use while running the manage application.
+ - The virtual environment must exist, otherwise the module will fail.
type: path
aliases: [virtual_env]
apps:
@@ -132,31 +126,24 @@ options:
aliases: [test_runner]
ack_venv_creation_deprecation:
description:
- - >-
- When a O(virtualenv) is set but the virtual environment does not exist, the current behavior is
- to create a new virtual environment. That behavior is deprecated and if that case happens it will
- generate a deprecation warning. Set this flag to V(true) to suppress the deprecation warning.
- - Please note that you will receive no further warning about this being removed until the module
- will start failing in such cases from community.general 9.0.0 on.
+ - This option no longer has any effect since community.general 9.0.0.
+ - It will be removed from community.general 11.0.0.
type: bool
version_added: 5.8.0
notes:
- >
- B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in
- community.general version 9.0.0 (estimated to be released in May 2024).
- Please notice that Django 4.1 requires Python 3.8 or greater.
- - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the O(virtualenv) parameter
- is specified. This requirement is deprecated and will be removed in community.general version 9.0.0.
- - This module will create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not already
- exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0.
- - The parameter O(virtualenv) will remain in use, but it will require the specified virtualenv to exist.
- The recommended way to create one in Ansible is by using M(ansible.builtin.pip).
+ B(ATTENTION): Support for Django releases older than 4.1 has been removed in
+ community.general version 9.0.0. While the module allows for free-form commands
+ does not verify the version of Django being used, it is B(strongly recommended)
+ to use a more recent version of Django.
+ - Please notice that Django 4.1 requires Python 3.8 or greater.
+ - This module will not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment
+ does not already exist at the given location. This behavior changed in community.general version 9.0.0.
+ - The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip).
- This module assumes English error messages for the V(createcachetable) command to detect table existence,
unfortunately.
- - To be able to use the V(migrate) command with django versions < 1.7, you must have C(south) installed and added
- as an app in your settings.
- - To be able to use the V(collectstatic) command, you must have enabled staticfiles in your settings.
+ - To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings.
- Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang,
for example C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
seealso:
@@ -169,7 +156,7 @@ seealso:
- name: What Python version can I use with Django?
description: From the Django FAQ, the response to Python requirements for the framework.
link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django
-requirements: [ "virtualenv", "django" ]
+requirements: [ "django >= 4.1" ]
author:
- Alexei Znamensky (@russoz)
- Scott Anderson (@tastychutney)
@@ -178,7 +165,7 @@ author:
EXAMPLES = """
- name: Run cleanup on the application installed in django_dir
community.general.django_manage:
- command: cleanup
+ command: clearsessions
project_path: "{{ django_dir }}"
- name: Load the initial_data fixture into the application
@@ -189,7 +176,7 @@ EXAMPLES = """
- name: Run syncdb on the application
community.general.django_manage:
- command: syncdb
+ command: migrate
project_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
@@ -233,22 +220,7 @@ def _ensure_virtualenv(module):
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
- # In version 9.0.0, if the venv is not found, it should fail_json() here.
- if not module.params['ack_venv_creation_deprecation']:
- module.deprecate(
- 'The behavior of "creating the virtual environment when missing" is being '
- 'deprecated and will be removed in community.general version 9.0.0. '
- 'Set the module parameter `ack_venv_creation_deprecation: true` to '
- 'prevent this message from showing up when creating a virtualenv.',
- version='9.0.0',
- collection_name='community.general',
- )
-
- virtualenv = module.get_bin_path('virtualenv', True)
- vcmd = [virtualenv, venv_param]
- rc, out_venv, err_venv = module.run_command(vcmd)
- if rc != 0:
- _fail(module, vcmd, out_venv, err_venv)
+ module.fail_json(msg='%s does not point to a valid virtual environment' % venv_param)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
@@ -266,11 +238,6 @@ def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
-def syncdb_filter_output(line):
- return ("Creating table " in line) \
- or ("Installed" in line and "Installed 0 object" not in line)
-
-
def migrate_filter_output(line):
return ("Migrating forwards " in line) \
or ("Installed" in line and "Installed 0 object" not in line) \
@@ -283,13 +250,10 @@ def collectstatic_filter_output(line):
def main():
command_allowed_param_map = dict(
- cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
- syncdb=('database', ),
test=('failfast', 'testrunner', 'apps', ),
- validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
@@ -301,7 +265,6 @@ def main():
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
- 'syncdb',
'migrate',
'test',
'collectstatic',
@@ -333,7 +296,7 @@ def main():
skip=dict(type='bool'),
merge=dict(type='bool'),
link=dict(type='bool'),
- ack_venv_creation_deprecation=dict(type='bool'),
+ ack_venv_creation_deprecation=dict(type='bool', removed_in_version='11.0.0', removed_from_collection='community.general'),
),
)
@@ -342,21 +305,6 @@ def main():
project_path = module.params['project_path']
virtualenv = module.params['virtualenv']
- try:
- _deprecation = dict(
- cleanup="clearsessions",
- syncdb="migrate",
- validate="check",
- )
- module.deprecate(
- 'The command {0} has been deprecated as it is no longer supported in recent Django versions.'
- 'Please use the command {1} instead that provide similar capability.'.format(command_bin, _deprecation[command_bin]),
- version='9.0.0',
- collection_name='community.general'
- )
- except KeyError:
- pass
-
for param in specific_params:
value = module.params[param]
if value and param not in command_allowed_param_map[command_bin]:
diff --git a/ansible_collections/community/general/plugins/modules/flowdock.py b/ansible_collections/community/general/plugins/modules/flowdock.py
deleted file mode 100644
index 0e8a7461d..000000000
--- a/ansible_collections/community/general/plugins/modules/flowdock.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright 2013 Matt Coddington <coddington@gmail.com>
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: flowdock
-author: "Matt Coddington (@mcodd)"
-short_description: Send a message to a flowdock
-description:
- - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- token:
- type: str
- description:
- - API token.
- required: true
- type:
- type: str
- description:
- - Whether to post to 'inbox' or 'chat'
- required: true
- choices: [ "inbox", "chat" ]
- msg:
- type: str
- description:
- - Content of the message
- required: true
- tags:
- type: str
- description:
- - tags of the message, separated by commas
- required: false
- external_user_name:
- type: str
- description:
- - (chat only - required) Name of the "user" sending the message
- required: false
- from_address:
- type: str
- description:
- - (inbox only - required) Email address of the message sender
- required: false
- source:
- type: str
- description:
- - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
- required: false
- subject:
- type: str
- description:
- - (inbox only - required) Subject line of the message
- required: false
- from_name:
- type: str
- description:
- - (inbox only) Name of the message sender
- required: false
- reply_to:
- type: str
- description:
- - (inbox only) Email address for replies
- required: false
- project:
- type: str
- description:
- - (inbox only) Human readable identifier for more detailed message categorization
- required: false
- link:
- type: str
- description:
- - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
- required: false
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: true
- type: bool
-
-requirements: [ ]
-'''
-
-EXAMPLES = '''
-- name: Send a message to a flowdock
- community.general.flowdock:
- type: inbox
- token: AAAAAA
- from_address: user@example.com
- source: my cool app
- msg: test from ansible
- subject: test subject
-
-- name: Send a message to a flowdock
- community.general.flowdock:
- type: chat
- token: AAAAAA
- external_user_name: testuser
- msg: test from ansible
- tags: tag1,tag2,tag3
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import fetch_url
-
-
-# ===========================================
-# Module execution.
-#
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- token=dict(required=True, no_log=True),
- msg=dict(required=True),
- type=dict(required=True, choices=["inbox", "chat"]),
- external_user_name=dict(required=False),
- from_address=dict(required=False),
- source=dict(required=False),
- subject=dict(required=False),
- from_name=dict(required=False),
- reply_to=dict(required=False),
- project=dict(required=False),
- tags=dict(required=False),
- link=dict(required=False),
- validate_certs=dict(default=True, type='bool'),
- ),
- supports_check_mode=True
- )
-
- type = module.params["type"]
- token = module.params["token"]
- if type == 'inbox':
- url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
- else:
- url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
-
- params = {}
-
- # required params
- params['content'] = module.params["msg"]
-
- # required params for the 'chat' type
- if module.params['external_user_name']:
- if type == 'inbox':
- module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
- else:
- params['external_user_name'] = module.params["external_user_name"]
- elif type == 'chat':
- module.fail_json(msg="external_user_name is required for the 'chat' type")
-
- # required params for the 'inbox' type
- for item in ['from_address', 'source', 'subject']:
- if module.params[item]:
- if type == 'chat':
- module.fail_json(msg="%s is not valid for the 'chat' type" % item)
- else:
- params[item] = module.params[item]
- elif type == 'inbox':
- module.fail_json(msg="%s is required for the 'inbox' type" % item)
-
- # optional params
- if module.params["tags"]:
- params['tags'] = module.params["tags"]
-
- # optional params for the 'inbox' type
- for item in ['from_name', 'reply_to', 'project', 'link']:
- if module.params[item]:
- if type == 'chat':
- module.fail_json(msg="%s is not valid for the 'chat' type" % item)
- else:
- params[item] = module.params[item]
-
- # If we're in check mode, just exit pretending like we succeeded
- if module.check_mode:
- module.exit_json(changed=False)
-
- # Send the data to Flowdock
- data = urlencode(params)
- response, info = fetch_url(module, url, data=data)
- if info['status'] != 200:
- module.fail_json(msg="unable to send msg: %s" % info['msg'])
-
- module.exit_json(changed=True, msg=module.params["msg"])
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/gandi_livedns.py b/ansible_collections/community/general/plugins/modules/gandi_livedns.py
index fdb7993a5..ad2e96fd1 100644
--- a/ansible_collections/community/general/plugins/modules/gandi_livedns.py
+++ b/ansible_collections/community/general/plugins/modules/gandi_livedns.py
@@ -25,11 +25,19 @@ attributes:
diff_mode:
support: none
options:
+ personal_access_token:
+ description:
+ - Scoped API token.
+ - One of O(personal_access_token) and O(api_key) must be specified.
+ type: str
+ version_added: 9.0.0
api_key:
description:
- Account API token.
+ - Note that these type of keys are deprecated and might stop working at some point.
+ Use personal access tokens instead.
+ - One of O(personal_access_token) and O(api_key) must be specified.
type: str
- required: true
record:
description:
- Record to add.
@@ -73,7 +81,7 @@ EXAMPLES = r'''
values:
- 127.0.0.1
ttl: 7200
- api_key: dummyapitoken
+ personal_access_token: dummytoken
register: record
- name: Create a mail CNAME record to www.my.com domain
@@ -84,7 +92,7 @@ EXAMPLES = r'''
values:
- www
ttl: 7200
- api_key: dummyapitoken
+ personal_access_token: dummytoken
state: present
- name: Change its TTL
@@ -95,7 +103,7 @@ EXAMPLES = r'''
values:
- www
ttl: 10800
- api_key: dummyapitoken
+ personal_access_token: dummytoken
state: present
- name: Delete the record
@@ -103,8 +111,18 @@ EXAMPLES = r'''
domain: my.com
type: CNAME
record: mail
- api_key: dummyapitoken
+ personal_access_token: dummytoken
state: absent
+
+- name: Use a (deprecated) API Key
+ community.general.gandi_livedns:
+ domain: my.com
+ record: test
+ type: A
+ values:
+ - 127.0.0.1
+ ttl: 7200
+ api_key: dummyapikey
'''
RETURN = r'''
@@ -151,7 +169,8 @@ from ansible_collections.community.general.plugins.module_utils.gandi_livedns_ap
def main():
module = AnsibleModule(
argument_spec=dict(
- api_key=dict(type='str', required=True, no_log=True),
+ api_key=dict(type='str', no_log=True),
+ personal_access_token=dict(type='str', no_log=True),
record=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
ttl=dict(type='int'),
@@ -163,6 +182,12 @@ def main():
required_if=[
('state', 'present', ['values', 'ttl']),
],
+ mutually_exclusive=[
+ ('api_key', 'personal_access_token'),
+ ],
+ required_one_of=[
+ ('api_key', 'personal_access_token'),
+ ],
)
gandi_api = GandiLiveDNSAPI(module)
diff --git a/ansible_collections/community/general/plugins/modules/gconftool2.py b/ansible_collections/community/general/plugins/modules/gconftool2.py
index a40304a16..db7c6dc88 100644
--- a/ansible_collections/community/general/plugins/modules/gconftool2.py
+++ b/ansible_collections/community/general/plugins/modules/gconftool2.py
@@ -123,6 +123,7 @@ class GConftool(StateModuleHelper):
],
supports_check_mode=True,
)
+ use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
diff --git a/ansible_collections/community/general/plugins/modules/gconftool2_info.py b/ansible_collections/community/general/plugins/modules/gconftool2_info.py
index 282065b95..f66e2da8f 100644
--- a/ansible_collections/community/general/plugins/modules/gconftool2_info.py
+++ b/ansible_collections/community/general/plugins/modules/gconftool2_info.py
@@ -60,6 +60,7 @@ class GConftoolInfo(ModuleHelper):
),
supports_check_mode=True,
)
+ use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/ansible_collections/community/general/plugins/modules/gitlab_runner.py
index e6163a6b6..96b3eb3fa 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_runner.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_runner.py
@@ -15,17 +15,20 @@ DOCUMENTATION = '''
module: gitlab_runner
short_description: Create, modify and delete GitLab Runners
description:
- - Register, update and delete runners with the GitLab API.
+ - Register, update and delete runners on GitLab Server side with the GitLab API.
- All operations are performed using the GitLab API v4.
- - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html).
+ - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html)
+ and U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user).
- A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at
U(https://$GITLAB_URL/profile/personal_access_tokens).
- A valid registration token is required for registering a new runner.
To create shared runners, you need to ask your administrator to give you this token.
It can be found at U(https://$GITLAB_URL/admin/runners/).
+ - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through its API.
+ Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command
notes:
- To create a new runner at least the O(api_token), O(description) and O(api_url) options are required.
- - Runners need to have unique descriptions.
+ - Runners need to have unique descriptions, since this attribute is used as key for idempotency
author:
- Samy Coenen (@SamyCoenen)
- Guillaume Martinez (@Lunik)
@@ -153,7 +156,45 @@ options:
'''
EXAMPLES = '''
-- name: "Register runner"
+- name: Create an instance-level runner
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: present
+ active: true
+ tag_list: ['docker']
+ run_untagged: false
+ locked: false
+ register: runner # Register module output to run C(gitlab-runner register) command in another task
+
+- name: Create a group-level runner
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: present
+ active: true
+ tag_list: ['docker']
+ run_untagged: false
+ locked: false
+ group: top-level-group/subgroup
+ register: runner # Register module output to run C(gitlab-runner register) command in another task
+
+- name: Create a project-level runner
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: present
+ active: true
+ tag_list: ['docker']
+ run_untagged: false
+ locked: false
+ project: top-level-group/subgroup/project
+ register: runner # Register module output to run C(gitlab-runner register) command in another task
+
+- name: "Register instance-level runner with registration token (deprecated)"
community.general.gitlab_runner:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
@@ -164,6 +205,7 @@ EXAMPLES = '''
tag_list: ['docker']
run_untagged: false
locked: false
+ register: runner # Register module output to run C(gitlab-runner register) command in another task
- name: "Delete runner"
community.general.gitlab_runner:
@@ -180,7 +222,7 @@ EXAMPLES = '''
owned: true
state: absent
-- name: Register runner for a specific project
+- name: "Register a project-level runner with registration token (deprecated)"
community.general.gitlab_runner:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
@@ -188,6 +230,7 @@ EXAMPLES = '''
description: MyProject runner
state: present
project: mygroup/mysubgroup/myproject
+ register: runner # Register module output to run C(gitlab-runner register) command in another task
'''
RETURN = '''
diff --git a/ansible_collections/community/general/plugins/modules/homebrew.py b/ansible_collections/community/general/plugins/modules/homebrew.py
index 5d471797a..2b60846b4 100644
--- a/ansible_collections/community/general/plugins/modules/homebrew.py
+++ b/ansible_collections/community/general/plugins/modules/homebrew.py
@@ -76,6 +76,13 @@ options:
type: list
elements: str
version_added: '0.2.0'
+ force_formula:
+ description:
+ - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)).
+ - To install a cask, use the M(community.general.homebrew_cask) module.
+ type: bool
+ default: false
+ version_added: 9.0.0
notes:
- When used with a C(loop:) each package will be processed individually,
it is much more efficient to pass the list directly to the O(name) option.
@@ -141,6 +148,12 @@ EXAMPLES = '''
community.general.homebrew:
upgrade_all: true
upgrade_options: ignore-pinned
+
+- name: Force installing a formula whose name is also a cask name
+ community.general.homebrew:
+ name: ambiguous_formula
+ state: present
+ force_formula: true
'''
RETURN = '''
@@ -166,9 +179,10 @@ changed_pkgs:
'''
import json
-import os.path
import re
+from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate
+
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems, string_types
@@ -195,99 +209,8 @@ def _check_package_in_json(json_output, package_type):
class Homebrew(object):
'''A class to manage Homebrew packages.'''
- # class regexes ------------------------------------------------ {{{
- VALID_PATH_CHARS = r'''
- \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
- \s # spaces
- : # colons
- {sep} # the OS-specific path separator
- . # dots
- \- # dashes
- '''.format(sep=os.path.sep)
-
- VALID_BREW_PATH_CHARS = r'''
- \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
- \s # spaces
- {sep} # the OS-specific path separator
- . # dots
- \- # dashes
- '''.format(sep=os.path.sep)
-
- VALID_PACKAGE_CHARS = r'''
- \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
- . # dots
- / # slash (for taps)
- \+ # plusses
- \- # dashes
- : # colons (for URLs)
- @ # at-sign
- '''
-
- INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
- INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
- INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS)
- # /class regexes ----------------------------------------------- }}}
-
# class validations -------------------------------------------- {{{
@classmethod
- def valid_path(cls, path):
- '''
- `path` must be one of:
- - list of paths
- - a string containing only:
- - alphanumeric characters
- - dashes
- - dots
- - spaces
- - colons
- - os.path.sep
- '''
-
- if isinstance(path, string_types):
- return not cls.INVALID_PATH_REGEX.search(path)
-
- try:
- iter(path)
- except TypeError:
- return False
- else:
- paths = path
- return all(cls.valid_brew_path(path_) for path_ in paths)
-
- @classmethod
- def valid_brew_path(cls, brew_path):
- '''
- `brew_path` must be one of:
- - None
- - a string containing only:
- - alphanumeric characters
- - dashes
- - dots
- - spaces
- - os.path.sep
- '''
-
- if brew_path is None:
- return True
-
- return (
- isinstance(brew_path, string_types)
- and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
- )
-
- @classmethod
- def valid_package(cls, package):
- '''A valid package is either None or alphanumeric.'''
-
- if package is None:
- return True
-
- return (
- isinstance(package, string_types)
- and not cls.INVALID_PACKAGE_REGEX.search(package)
- )
-
- @classmethod
def valid_state(cls, state):
'''
A valid state is one of:
@@ -346,7 +269,7 @@ class Homebrew(object):
@path.setter
def path(self, path):
- if not self.valid_path(path):
+ if not HomebrewValidate.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
@@ -366,7 +289,7 @@ class Homebrew(object):
@brew_path.setter
def brew_path(self, brew_path):
- if not self.valid_brew_path(brew_path):
+ if not HomebrewValidate.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
@@ -391,7 +314,7 @@ class Homebrew(object):
@current_package.setter
def current_package(self, package):
- if not self.valid_package(package):
+ if not HomebrewValidate.valid_package(package):
self._current_package = None
self.failed = True
self.message = 'Invalid package: {0}.'.format(package)
@@ -404,7 +327,8 @@ class Homebrew(object):
def __init__(self, module, path, packages=None, state=None,
update_homebrew=False, upgrade_all=False,
- install_options=None, upgrade_options=None):
+ install_options=None, upgrade_options=None,
+ force_formula=False):
if not install_options:
install_options = list()
if not upgrade_options:
@@ -414,7 +338,8 @@ class Homebrew(object):
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all,
install_options=install_options,
- upgrade_options=upgrade_options,)
+ upgrade_options=upgrade_options,
+ force_formula=force_formula)
self._prep()
@@ -476,7 +401,7 @@ class Homebrew(object):
# checks ------------------------------------------------------- {{{
def _current_package_is_installed(self):
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
@@ -487,17 +412,19 @@ class Homebrew(object):
"--json=v2",
self.current_package,
]
+ if self.force_formula:
+ cmd.append("--formula")
rc, out, err = self.module.run_command(cmd)
- if err:
+ if rc != 0:
self.failed = True
- self.message = err.strip()
+ self.message = err.strip() or ("Unknown failure with exit code %d" % rc)
raise HomebrewException(self.message)
data = json.loads(out)
return _check_package_in_json(data, "formulae") or _check_package_in_json(data, "casks")
def _current_package_is_outdated(self):
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
return False
rc, out, err = self.module.run_command([
@@ -509,7 +436,7 @@ class Homebrew(object):
return rc != 0
def _current_package_is_installed_from_head(self):
- if not Homebrew.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
return False
elif not self._current_package_is_installed():
return False
@@ -607,7 +534,7 @@ class Homebrew(object):
# installed ------------------------------ {{{
def _install_current_package(self):
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
@@ -632,10 +559,15 @@ class Homebrew(object):
else:
head = None
+ if self.force_formula:
+ formula = '--formula'
+ else:
+ formula = None
+
opts = (
[self.brew_path, 'install']
+ self.install_options
- + [self.current_package, head]
+ + [self.current_package, head, formula]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
@@ -663,7 +595,7 @@ class Homebrew(object):
def _upgrade_current_package(self):
command = 'upgrade'
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
@@ -734,7 +666,7 @@ class Homebrew(object):
# uninstalled ---------------------------- {{{
def _uninstall_current_package(self):
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
@@ -783,7 +715,7 @@ class Homebrew(object):
# linked --------------------------------- {{{
def _link_current_package(self):
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
@@ -830,7 +762,7 @@ class Homebrew(object):
# unlinked ------------------------------- {{{
def _unlink_current_package(self):
- if not self.valid_package(self.current_package):
+ if not HomebrewValidate.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
@@ -919,7 +851,11 @@ def main():
default=None,
type='list',
elements='str',
- )
+ ),
+ force_formula=dict(
+ default=False,
+ type='bool',
+ ),
),
supports_check_mode=True,
)
@@ -951,6 +887,7 @@ def main():
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
+ force_formula = p['force_formula']
update_homebrew = p['update_homebrew']
if not update_homebrew:
module.run_command_environ_update.update(
@@ -967,7 +904,7 @@ def main():
brew = Homebrew(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all, install_options=install_options,
- upgrade_options=upgrade_options)
+ upgrade_options=upgrade_options, force_formula=force_formula)
(failed, changed, message) = brew.run()
changed_pkgs = brew.changed_pkgs
unchanged_pkgs = brew.unchanged_pkgs
diff --git a/ansible_collections/community/general/plugins/modules/homebrew_cask.py b/ansible_collections/community/general/plugins/modules/homebrew_cask.py
index c992693b6..dc9aea5db 100644
--- a/ansible_collections/community/general/plugins/modules/homebrew_cask.py
+++ b/ansible_collections/community/general/plugins/modules/homebrew_cask.py
@@ -158,6 +158,7 @@ import re
import tempfile
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.basic import AnsibleModule
@@ -183,23 +184,6 @@ class HomebrewCask(object):
'''A class to manage Homebrew casks.'''
# class regexes ------------------------------------------------ {{{
- VALID_PATH_CHARS = r'''
- \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
- \s # spaces
- : # colons
- {sep} # the OS-specific path separator
- . # dots
- \- # dashes
- '''.format(sep=os.path.sep)
-
- VALID_BREW_PATH_CHARS = r'''
- \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
- \s # spaces
- {sep} # the OS-specific path separator
- . # dots
- \- # dashes
- '''.format(sep=os.path.sep)
-
VALID_CASK_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
@@ -208,59 +192,11 @@ class HomebrewCask(object):
@ # at symbol
'''
- INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
- INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
- def valid_path(cls, path):
- '''
- `path` must be one of:
- - list of paths
- - a string containing only:
- - alphanumeric characters
- - dashes
- - dots
- - spaces
- - colons
- - os.path.sep
- '''
-
- if isinstance(path, (string_types)):
- return not cls.INVALID_PATH_REGEX.search(path)
-
- try:
- iter(path)
- except TypeError:
- return False
- else:
- paths = path
- return all(cls.valid_brew_path(path_) for path_ in paths)
-
- @classmethod
- def valid_brew_path(cls, brew_path):
- '''
- `brew_path` must be one of:
- - None
- - a string containing only:
- - alphanumeric characters
- - dashes
- - dots
- - spaces
- - os.path.sep
- '''
-
- if brew_path is None:
- return True
-
- return (
- isinstance(brew_path, string_types)
- and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
- )
-
- @classmethod
def valid_cask(cls, cask):
'''A valid cask is either None or alphanumeric + backslashes.'''
@@ -321,7 +257,7 @@ class HomebrewCask(object):
@path.setter
def path(self, path):
- if not self.valid_path(path):
+ if not HomebrewValidate.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
@@ -341,7 +277,7 @@ class HomebrewCask(object):
@brew_path.setter
def brew_path(self, brew_path):
- if not self.valid_brew_path(brew_path):
+ if not HomebrewValidate.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
diff --git a/ansible_collections/community/general/plugins/modules/hponcfg.py b/ansible_collections/community/general/plugins/modules/hponcfg.py
index 612a20d92..206565a23 100644
--- a/ansible_collections/community/general/plugins/modules/hponcfg.py
+++ b/ansible_collections/community/general/plugins/modules/hponcfg.py
@@ -98,6 +98,7 @@ class HPOnCfg(ModuleHelper):
verbose=cmd_runner_fmt.as_bool("-v"),
minfw=cmd_runner_fmt.as_opt_val("-m"),
)
+ use_old_vardict = False
def __run__(self):
runner = CmdRunner(
diff --git a/ansible_collections/community/general/plugins/modules/installp.py b/ansible_collections/community/general/plugins/modules/installp.py
index 4b5a6949c..1531d2cad 100644
--- a/ansible_collections/community/general/plugins/modules/installp.py
+++ b/ansible_collections/community/general/plugins/modules/installp.py
@@ -106,7 +106,7 @@ def _check_new_pkg(module, package, repository_path):
if os.path.isdir(repository_path):
installp_cmd = module.get_bin_path('installp', True)
- rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
+ rc, package_result, err = module.run_command([installp_cmd, "-l", "-MR", "-d", repository_path])
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
@@ -142,7 +142,7 @@ def _check_installed_pkg(module, package, repository_path):
"""
lslpp_cmd = module.get_bin_path('lslpp', True)
- rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
+ rc, lslpp_result, err = module.run_command([lslpp_cmd, "-lcq", "%s*" % (package, )])
if rc == 1:
package_state = ' '.join(err.split()[-2:])
@@ -173,7 +173,7 @@ def remove(module, installp_cmd, packages):
if pkg_check:
if not module.check_mode:
- rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
+ rc, remove_out, err = module.run_command([installp_cmd, "-u", package])
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
remove_count += 1
@@ -202,8 +202,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license):
already_installed_pkgs = {}
accept_license_param = {
- True: '-Y',
- False: '',
+ True: ['-Y'],
+ False: [],
}
# Validate if package exists on repository path.
@@ -230,7 +230,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license):
else:
if not module.check_mode:
- rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
+ rc, out, err = module.run_command(
+ [installp_cmd, "-a"] + accept_license_param[accept_license] + ["-X", "-d", repository_path, package])
if rc != 0:
module.fail_json(msg="Failed to run installp", rc=rc, err=err)
installed_pkgs.append(package)
diff --git a/ansible_collections/community/general/plugins/modules/irc.py b/ansible_collections/community/general/plugins/modules/irc.py
index 00ff299ee..e40ba2d0b 100644
--- a/ansible_collections/community/general/plugins/modules/irc.py
+++ b/ansible_collections/community/general/plugins/modules/irc.py
@@ -85,8 +85,10 @@ options:
was exlusively called O(use_ssl). The latter is now an alias of O(use_tls).
- B(Note:) for security reasons, you should always set O(use_tls=true) and
O(validate_certs=true) whenever possible.
+ - The option currently defaults to V(false). The default has been B(deprecated) and will
+ change to V(true) in community.general 10.0.0. To avoid deprecation warnings, explicitly
+ set this option to a value (preferably V(true)).
type: bool
- default: false
aliases:
- use_ssl
part:
@@ -108,7 +110,9 @@ options:
if the network between between Ansible and the IRC server is known to be safe.
- B(Note:) for security reasons, you should always set O(use_tls=true) and
O(validate_certs=true) whenever possible.
- default: false
+ - The option currently defaults to V(false). The default has been B(deprecated) and will
+ change to V(true) in community.general 10.0.0. To avoid deprecation warnings, explicitly
+ set this option to a value (preferably V(true)).
type: bool
version_added: 8.1.0
@@ -309,8 +313,8 @@ def main():
passwd=dict(no_log=True),
timeout=dict(type='int', default=30),
part=dict(type='bool', default=True),
- use_tls=dict(type='bool', default=False, aliases=['use_ssl']),
- validate_certs=dict(type='bool', default=False),
+ use_tls=dict(type='bool', aliases=['use_ssl']),
+ validate_certs=dict(type='bool'),
),
supports_check_mode=True,
required_one_of=[['channel', 'nick_to']]
@@ -334,6 +338,25 @@ def main():
style = module.params["style"]
validate_certs = module.params["validate_certs"]
+ if use_tls is None:
+ module.deprecate(
+ 'The default of use_tls will change to true in community.general 10.0.0.'
+ ' Set a value now (preferably true, if possible) to avoid the deprecation warning.',
+ version='10.0.0',
+ collection_name='community.general',
+ )
+ use_tls = False
+
+ if validate_certs is None:
+ if use_tls:
+ module.deprecate(
+ 'The default of validate_certs will change to true in community.general 10.0.0.'
+ ' Set a value now (prefarably true, if possible) to avoid the deprecation warning.',
+ version='10.0.0',
+ collection_name='community.general',
+ )
+ validate_certs = False
+
try:
send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style)
except Exception as e:
diff --git a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
index b5bd90403..224b5bba8 100644
--- a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
+++ b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
@@ -67,6 +67,7 @@ class Blacklist(StateModuleHelper):
),
supports_check_mode=True,
)
+ use_old_vardict = False
def __init_module__(self):
self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client.py b/ansible_collections/community/general/plugins/modules/keycloak_client.py
index cd9c60bac..3628e5a51 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_client.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_client.py
@@ -775,8 +775,9 @@ def sanitize_cr(clientrep):
if 'secret' in result:
result['secret'] = 'no_log'
if 'attributes' in result:
- if 'saml.signing.private.key' in result['attributes']:
- result['attributes']['saml.signing.private.key'] = 'no_log'
+ attributes = result['attributes']
+ if isinstance(attributes, dict) and 'saml.signing.private.key' in attributes:
+ attributes['saml.signing.private.key'] = 'no_log'
return normalise_cr(result)
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
index 588f553e8..2eca3a06d 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
@@ -437,7 +437,7 @@ def sanitize(idp):
idpcopy = deepcopy(idp)
if 'config' in idpcopy:
if 'clientSecret' in idpcopy['config']:
- idpcopy['clientSecret'] = '**********'
+ idpcopy['config']['clientSecret'] = '**********'
return idpcopy
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
index fee0d1265..f87ef936c 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
@@ -719,6 +719,9 @@ def sanitize(comp):
compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items())
if 'bindCredential' in compcopy['config']:
compcopy['config']['bindCredential'] = '**********'
+ # an empty string is valid for krbPrincipalAttribute but is filtered out in diff
+ if 'krbPrincipalAttribute' not in compcopy['config']:
+ compcopy['config']['krbPrincipalAttribute'] = ''
if 'mappers' in compcopy:
for mapper in compcopy['mappers']:
if 'config' in mapper:
diff --git a/ansible_collections/community/general/plugins/modules/ldap_search.py b/ansible_collections/community/general/plugins/modules/ldap_search.py
index 45744e634..7958f86e0 100644
--- a/ansible_collections/community/general/plugins/modules/ldap_search.py
+++ b/ansible_collections/community/general/plugins/modules/ldap_search.py
@@ -44,6 +44,8 @@ options:
type: str
description:
- The LDAP scope to use.
+ - V(subordinate) requires the LDAPv3 subordinate feature extension.
+ - V(children) is equivalent to a "subtree" scope.
filter:
default: '(objectClass=*)'
type: str
diff --git a/ansible_collections/community/general/plugins/modules/locale_gen.py b/ansible_collections/community/general/plugins/modules/locale_gen.py
index 0dd76c9ab..fe501e023 100644
--- a/ansible_collections/community/general/plugins/modules/locale_gen.py
+++ b/ansible_collections/community/general/plugins/modules/locale_gen.py
@@ -79,6 +79,7 @@ class LocaleGen(StateModuleHelper):
),
supports_check_mode=True,
)
+ use_old_vardict = False
def __init_module__(self):
self.vars.set("ubuntu_mode", False)
diff --git a/ansible_collections/community/general/plugins/modules/lvg.py b/ansible_collections/community/general/plugins/modules/lvg.py
index 8a6384369..7ff7e3a2e 100644
--- a/ansible_collections/community/general/plugins/modules/lvg.py
+++ b/ansible_collections/community/general/plugins/modules/lvg.py
@@ -179,7 +179,7 @@ def parse_vgs(data):
def find_mapper_device_name(module, dm_device):
dmsetup_cmd = module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
- rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ rc, dm_name, err = module.run_command([dmsetup_cmd, "info", "-C", "--noheadings", "-o", "name", dm_device])
if rc != 0:
module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
mapper_device = mapper_prefix + dm_name.rstrip()
@@ -204,7 +204,7 @@ def find_vg(module, vg):
if not vg:
return None
vgs_cmd = module.get_bin_path('vgs', True)
- dummy, current_vgs, dummy = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd, check_rc=True)
+ dummy, current_vgs, dummy = module.run_command([vgs_cmd, "--noheadings", "-o", "vg_name,pv_count,lv_count", "--separator", ";"], check_rc=True)
vgs = parse_vgs(current_vgs)
@@ -431,10 +431,10 @@ def main():
for x in itertools.chain(dev_list, module.params['pvs'])
)
pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
- pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ pvs_filter = ["--select", "{0} || {1}".format(pvs_filter_pv_name, pvs_filter_vg_name)]
else:
- pvs_filter = ''
- rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ pvs_filter = []
+ rc, current_pvs, err = module.run_command([pvs_cmd, "--noheadings", "-o", "pv_name,vg_name", "--separator", ";"] + pvs_filter)
if rc != 0:
module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
@@ -473,7 +473,7 @@ def main():
if this_vg['lv_count'] == 0 or force:
# remove VG
vgremove_cmd = module.get_bin_path('vgremove', True)
- rc, dummy, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ rc, dummy, err = module.run_command([vgremove_cmd, "--force", vg])
if rc == 0:
module.exit_json(changed=True)
else:
@@ -509,7 +509,6 @@ def main():
changed = True
else:
if devs_to_add:
- devs_to_add_string = ' '.join(devs_to_add)
# create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in devs_to_add:
@@ -520,21 +519,20 @@ def main():
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
# add PV to our VG
vgextend_cmd = module.get_bin_path('vgextend', True)
- rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ rc, dummy, err = module.run_command([vgextend_cmd, vg] + devs_to_add)
if rc == 0:
changed = True
else:
- module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, ' '.join(devs_to_add)), rc=rc, err=err)
# remove some PV from our VG
if devs_to_remove:
- devs_to_remove_string = ' '.join(devs_to_remove)
vgreduce_cmd = module.get_bin_path('vgreduce', True)
- rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ rc, dummy, err = module.run_command([vgreduce_cmd, "--force", vg] + devs_to_remove)
if rc == 0:
changed = True
else:
- module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, ' '.join(devs_to_remove)), rc=rc, err=err)
module.exit_json(changed=changed)
diff --git a/ansible_collections/community/general/plugins/modules/lvol.py b/ansible_collections/community/general/plugins/modules/lvol.py
index a2a870260..3a2f5c7cd 100644
--- a/ansible_collections/community/general/plugins/modules/lvol.py
+++ b/ansible_collections/community/general/plugins/modules/lvol.py
@@ -236,6 +236,7 @@ EXAMPLES = '''
'''
import re
+import shlex
from ansible.module_utils.basic import AnsibleModule
@@ -281,7 +282,7 @@ def parse_vgs(data):
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
- rc, out, err = module.run_command("%s version" % (ver_cmd))
+ rc, out, err = module.run_command([ver_cmd, "version"])
if rc != 0:
return None
m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
@@ -320,14 +321,14 @@ def main():
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
- yesopt = "--yes"
+ yesopt = ["--yes"]
else:
- yesopt = ""
+ yesopt = []
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
- opts = module.params['opts']
+ opts = shlex.split(module.params['opts'] or '')
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
@@ -338,21 +339,13 @@ def main():
size_unit = 'm'
size_operator = None
snapshot = module.params['snapshot']
- pvs = module.params['pvs']
-
- if pvs is None:
- pvs = ""
- else:
- pvs = " ".join(pvs)
-
- if opts is None:
- opts = ""
+ pvs = module.params['pvs'] or []
# Add --test option when running in check-mode
if module.check_mode:
- test_opt = ' --test'
+ test_opt = ['--test']
else:
- test_opt = ''
+ test_opt = []
if size:
# LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing
@@ -400,7 +393,7 @@ def main():
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
- "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg))
+ [vgs_cmd, "--noheadings", "--nosuffix", "-o", "vg_name,size,free,vg_extent_size", "--units", unit.lower(), "--separator", ";", vg])
if rc != 0:
if state == 'absent':
@@ -414,7 +407,7 @@ def main():
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
- "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg))
+ [lvs_cmd, "-a", "--noheadings", "--nosuffix", "-o", "lv_name,size,lv_attr", "--units", unit.lower(), "--separator", ";", vg])
if rc != 0:
if state == 'absent':
@@ -474,20 +467,23 @@ def main():
# create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ cmd = [lvcreate_cmd] + test_opt + yesopt
if snapshot is not None:
if size:
- cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
- else:
- cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
- elif thinpool and lv:
- if size_opt == 'l':
- module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
- size_opt = 'V'
- cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
- elif thinpool and not lv:
- cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
+ cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)]
+ cmd += ["-s", "-n", snapshot] + opts + ["%s/%s" % (vg, lv)]
+ elif thinpool:
+ if lv:
+ if size_opt == 'l':
+ module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
+ size_opt = 'V'
+ cmd += ["-n", lv]
+ cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)]
+ cmd += opts + ["-T", "%s/%s" % (vg, thinpool)]
else:
- cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ cmd += ["-n", lv]
+ cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)]
+ cmd += opts + [vg] + pvs
rc, dummy, err = module.run_command(cmd)
if rc == 0:
changed = True
@@ -499,7 +495,7 @@ def main():
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=true." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
- rc, dummy, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ rc, dummy, err = module.run_command([lvremove_cmd] + test_opt + ["--force", "%s/%s" % (vg, this_lv['name'])])
if rc == 0:
module.exit_json(changed=True)
else:
@@ -527,7 +523,7 @@ def main():
if this_lv['size'] < size_requested:
if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])):
- tool = module.get_bin_path("lvextend", required=True)
+ tool = [module.get_bin_path("lvextend", required=True)]
else:
module.fail_json(
msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
@@ -539,16 +535,17 @@ def main():
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=true" % (this_lv['name']))
else:
- tool = module.get_bin_path("lvreduce", required=True)
- tool = '%s %s' % (tool, '--force')
+ tool = [module.get_bin_path("lvreduce", required=True), '--force']
if tool:
if resizefs:
- tool = '%s %s' % (tool, '--resizefs')
+ tool += ['--resizefs']
+ cmd = tool + test_opt
if size_operator:
- cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs)
+ cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)]
else:
- cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)]
+ cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
@@ -566,23 +563,24 @@ def main():
# resize LV based on absolute values
tool = None
if float(size) > this_lv['size'] or size_operator == '+':
- tool = module.get_bin_path("lvextend", required=True)
+ tool = [module.get_bin_path("lvextend", required=True)]
elif shrink and float(size) < this_lv['size'] or size_operator == '-':
if float(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=true." % (this_lv['name']))
else:
- tool = module.get_bin_path("lvreduce", required=True)
- tool = '%s %s' % (tool, '--force')
+ tool = [module.get_bin_path("lvreduce", required=True), '--force']
if tool:
if resizefs:
- tool = '%s %s' % (tool, '--resizefs')
+ tool += ['--resizefs']
+ cmd = tool + test_opt
if size_operator:
- cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs)
+ cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)]
else:
- cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)]
+ cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
@@ -598,14 +596,14 @@ def main():
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
- rc, dummy, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ rc, dummy, err = module.run_command([lvchange_cmd, "-ay", "%s/%s" % (vg, this_lv['name'])])
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
- rc, dummy, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ rc, dummy, err = module.run_command([lvchange_cmd, "-an", "%s/%s" % (vg, this_lv['name'])])
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
diff --git a/ansible_collections/community/general/plugins/modules/lxd_container.py b/ansible_collections/community/general/plugins/modules/lxd_container.py
index b82e2be9b..f44523a75 100644
--- a/ansible_collections/community/general/plugins/modules/lxd_container.py
+++ b/ansible_collections/community/general/plugins/modules/lxd_container.py
@@ -400,7 +400,7 @@ EXAMPLES = '''
protocol: simplestreams
type: image
mode: pull
- server: https://images.linuxcontainers.org
+ server: [...] # URL to the image server
alias: debian/11
timeout: 600
'''
diff --git a/ansible_collections/community/general/plugins/modules/macports.py b/ansible_collections/community/general/plugins/modules/macports.py
index e81fb9142..cd620687d 100644
--- a/ansible_collections/community/general/plugins/modules/macports.py
+++ b/ansible_collections/community/general/plugins/modules/macports.py
@@ -111,7 +111,7 @@ from ansible.module_utils.basic import AnsibleModule
def selfupdate(module, port_path):
""" Update Macports and the ports tree. """
- rc, out, err = module.run_command("%s -v selfupdate" % port_path)
+ rc, out, err = module.run_command([port_path, "-v", "selfupdate"])
if rc == 0:
updated = any(
@@ -135,7 +135,7 @@ def selfupdate(module, port_path):
def upgrade(module, port_path):
""" Upgrade outdated ports. """
- rc, out, err = module.run_command("%s upgrade outdated" % port_path)
+ rc, out, err = module.run_command([port_path, "upgrade", "outdated"])
# rc is 1 when nothing to upgrade so check stdout first.
if out.strip() == "Nothing to upgrade.":
@@ -182,7 +182,7 @@ def remove_ports(module, port_path, ports, stdout, stderr):
if not query_port(module, port_path, port):
continue
- rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
+ rc, out, err = module.run_command([port_path, "uninstall", port])
stdout += out
stderr += err
if query_port(module, port_path, port):
@@ -206,7 +206,7 @@ def install_ports(module, port_path, ports, variant, stdout, stderr):
if query_port(module, port_path, port):
continue
- rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
+ rc, out, err = module.run_command([port_path, "install", port, variant])
stdout += out
stderr += err
if not query_port(module, port_path, port):
@@ -232,7 +232,7 @@ def activate_ports(module, port_path, ports, stdout, stderr):
if query_port(module, port_path, port, state="active"):
continue
- rc, out, err = module.run_command("%s activate %s" % (port_path, port))
+ rc, out, err = module.run_command([port_path, "activate", port])
stdout += out
stderr += err
@@ -259,7 +259,7 @@ def deactivate_ports(module, port_path, ports, stdout, stderr):
if not query_port(module, port_path, port, state="active"):
continue
- rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
+ rc, out, err = module.run_command([port_path, "deactivate", port])
stdout += out
stderr += err
if query_port(module, port_path, port, state="active"):
diff --git a/ansible_collections/community/general/plugins/modules/mksysb.py b/ansible_collections/community/general/plugins/modules/mksysb.py
index 8272dbf7d..1280f04d5 100644
--- a/ansible_collections/community/general/plugins/modules/mksysb.py
+++ b/ansible_collections/community/general/plugins/modules/mksysb.py
@@ -138,6 +138,7 @@ class MkSysB(ModuleHelper):
backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"),
combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])),
)
+ use_old_vardict = False
def __init_module__(self):
if not os.path.isdir(self.vars.storage_path):
diff --git a/ansible_collections/community/general/plugins/modules/opkg.py b/ansible_collections/community/general/plugins/modules/opkg.py
index 757c88c5d..2f9794ab8 100644
--- a/ansible_collections/community/general/plugins/modules/opkg.py
+++ b/ansible_collections/community/general/plugins/modules/opkg.py
@@ -127,6 +127,7 @@ class Opkg(StateModuleHelper):
executable=dict(type="path"),
),
)
+ use_old_vardict = False
def __init_module__(self):
self.vars.set("install_c", 0, output=False, change=True)
diff --git a/ansible_collections/community/general/plugins/modules/parted.py b/ansible_collections/community/general/plugins/modules/parted.py
index 382e47a47..b3616a8ec 100644
--- a/ansible_collections/community/general/plugins/modules/parted.py
+++ b/ansible_collections/community/general/plugins/modules/parted.py
@@ -480,12 +480,12 @@ def get_device_info(device, unit):
if label_needed:
return get_unlabeled_device_info(device, unit)
- command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
+ command = [parted_exec, "-s", "-m", device, "--", "unit", unit, "print"]
rc, out, err = module.run_command(command)
if rc != 0 and 'unrecognised disk label' not in err:
module.fail_json(msg=(
"Error while getting device information with parted "
- "script: '%s'" % command),
+ "script: '%s'" % " ".join(command)),
rc=rc, out=out, err=err
)
@@ -506,7 +506,7 @@ def check_parted_label(device):
return False
# Older parted versions return a message in the stdout and RC > 0.
- rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
+ rc, out, err = module.run_command([parted_exec, "-s", "-m", device, "print"])
if rc != 0 and 'unrecognised disk label' in out.lower():
return True
@@ -546,7 +546,7 @@ def parted_version():
"""
global module, parted_exec # pylint: disable=global-variable-not-assigned
- rc, out, err = module.run_command("%s --version" % parted_exec)
+ rc, out, err = module.run_command([parted_exec, "--version"])
if rc != 0:
module.fail_json(
msg="Failed to get parted version.", rc=rc, out=out, err=err
@@ -580,6 +580,7 @@ def parted(script, device, align):
script_option = '-s'
if script and not module.check_mode:
+ # TODO: convert run_comand() argument to list!
command = "%s %s -m %s %s -- %s" % (parted_exec, script_option, align_option, device, script)
rc, out, err = module.run_command(command)
diff --git a/ansible_collections/community/general/plugins/modules/pipx.py b/ansible_collections/community/general/plugins/modules/pipx.py
index 705cc71a7..e82e4c32a 100644
--- a/ansible_collections/community/general/plugins/modules/pipx.py
+++ b/ansible_collections/community/general/plugins/modules/pipx.py
@@ -201,6 +201,7 @@ class PipX(StateModuleHelper):
],
supports_check_mode=True,
)
+ use_old_vardict = False
def _retrieve_installed(self):
def process_list(rc, out, err):
diff --git a/ansible_collections/community/general/plugins/modules/pipx_info.py b/ansible_collections/community/general/plugins/modules/pipx_info.py
index 34f9681b0..992ca7941 100644
--- a/ansible_collections/community/general/plugins/modules/pipx_info.py
+++ b/ansible_collections/community/general/plugins/modules/pipx_info.py
@@ -150,6 +150,7 @@ class PipXInfo(ModuleHelper):
),
supports_check_mode=True,
)
+ use_old_vardict = False
def __init_module__(self):
if self.vars.executable:
diff --git a/ansible_collections/community/general/plugins/modules/pkg5.py b/ansible_collections/community/general/plugins/modules/pkg5.py
index c4aace9f2..08fa9272f 100644
--- a/ansible_collections/community/general/plugins/modules/pkg5.py
+++ b/ansible_collections/community/general/plugins/modules/pkg5.py
@@ -54,6 +54,12 @@ options:
- Refresh publishers before execution.
type: bool
default: true
+ verbose:
+ description:
+ - Set to V(true) to disable quiet execution.
+ type: bool
+ default: false
+ version_added: 9.0.0
'''
EXAMPLES = '''
- name: Install Vim
@@ -90,6 +96,7 @@ def main():
accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']),
be_name=dict(type='str'),
refresh=dict(type='bool', default=True),
+ verbose=dict(type='bool', default=False),
),
supports_check_mode=True,
)
@@ -156,9 +163,15 @@ def ensure(module, state, packages, params):
else:
no_refresh = ['--no-refresh']
+ if params['verbose']:
+ verbosity = []
+ else:
+ verbosity = ['-q']
+
to_modify = list(filter(behaviour[state]['filter'], packages))
if to_modify:
- rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify)
+ rc, out, err = module.run_command(
+ ['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + verbosity + ['--'] + to_modify)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
diff --git a/ansible_collections/community/general/plugins/modules/pkgin.py b/ansible_collections/community/general/plugins/modules/pkgin.py
index 5b2e478b8..8b29655d3 100644
--- a/ansible_collections/community/general/plugins/modules/pkgin.py
+++ b/ansible_collections/community/general/plugins/modules/pkgin.py
@@ -145,18 +145,18 @@ def query_package(module, name):
"""
# test whether '-p' (parsable) flag is supported.
- rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+ rc, out, err = module.run_command([PKGIN_PATH, "-p", "-v"])
if rc == 0:
- pflag = '-p'
+ pflag = ['-p']
splitchar = ';'
else:
- pflag = ''
+ pflag = []
splitchar = ' '
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
- rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+ rc, out, err = module.run_command([PKGIN_PATH] + pflag + ["search", "^%s$" % name])
# rc will not be 0 unless the search was a success
if rc == 0:
@@ -234,22 +234,19 @@ def format_pkgin_command(module, command, package=None):
# an empty string. Some commands (e.g. 'update') will ignore extra
# arguments, however this behaviour cannot be relied on for others.
if package is None:
- package = ""
+ packages = []
+ else:
+ packages = [package]
if module.params["force"]:
- force = "-F"
+ force = ["-F"]
else:
- force = ""
-
- vars = {"pkgin": PKGIN_PATH,
- "command": command,
- "package": package,
- "force": force}
+ force = []
if module.check_mode:
- return "%(pkgin)s -n %(command)s %(package)s" % vars
+ return [PKGIN_PATH, "-n", command] + packages
else:
- return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+ return [PKGIN_PATH, "-y"] + force + [command] + packages
def remove_packages(module, packages):
diff --git a/ansible_collections/community/general/plugins/modules/portinstall.py b/ansible_collections/community/general/plugins/modules/portinstall.py
index e263b7181..59dafb1eb 100644
--- a/ansible_collections/community/general/plugins/modules/portinstall.py
+++ b/ansible_collections/community/general/plugins/modules/portinstall.py
@@ -79,12 +79,13 @@ def query_package(module, name):
if pkg_info_path:
pkgng = False
pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ # TODO: convert run_comand() argument to list!
rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True)
+ pkg_info_path = [pkg_info_path]
else:
pkgng = True
- pkg_info_path = module.get_bin_path('pkg', True)
- pkg_info_path = pkg_info_path + " info"
- rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+ pkg_info_path = [module.get_bin_path('pkg', True), "info"]
+ rc, out, err = module.run_command(pkg_info_path + [name])
found = rc == 0
@@ -94,10 +95,7 @@ def query_package(module, name):
# some package is installed
name_without_digits = re.sub('[0-9]', '', name)
if name != name_without_digits:
- if pkgng:
- rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
- else:
- rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ rc, out, err = module.run_command(pkg_info_path + [name_without_digits])
found = rc == 0
@@ -107,13 +105,13 @@ def query_package(module, name):
def matching_packages(module, name):
ports_glob_path = module.get_bin_path('ports_glob', True)
- rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ rc, out, err = module.run_command([ports_glob_path, name])
# counts the number of packages found
occurrences = out.count('\n')
if occurrences == 0:
name_without_digits = re.sub('[0-9]', '', name)
if name != name_without_digits:
- rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ rc, out, err = module.run_command([ports_glob_path, name_without_digits])
occurrences = out.count('\n')
return occurrences
@@ -135,10 +133,12 @@ def remove_packages(module, packages):
if not query_package(module, package):
continue
+ # TODO: convert run_comand() argument to list!
rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True)
if query_package(module, package):
name_without_digits = re.sub('[0-9]', '', package)
+ # TODO: convert run_comand() argument to list!
rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
shlex_quote(name_without_digits)),
use_unsafe_shell=True)
@@ -163,13 +163,13 @@ def install_packages(module, packages, use_packages):
if not portinstall_path:
pkg_path = module.get_bin_path('pkg', False)
if pkg_path:
- module.run_command("pkg install -y portupgrade")
+ module.run_command([pkg_path, "install", "-y", "portupgrade"])
portinstall_path = module.get_bin_path('portinstall', True)
if use_packages:
- portinstall_params = "--use-packages"
+ portinstall_params = ["--use-packages"]
else:
- portinstall_params = ""
+ portinstall_params = []
for package in packages:
if query_package(module, package):
@@ -178,7 +178,7 @@ def install_packages(module, packages, use_packages):
# TODO: check how many match
matches = matching_packages(module, package)
if matches == 1:
- rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ rc, out, err = module.run_command([portinstall_path, "--batch"] + portinstall_params + [package])
if not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
elif matches == 0:
diff --git a/ansible_collections/community/general/plugins/modules/proxmox.py b/ansible_collections/community/general/plugins/modules/proxmox.py
index 47f3faa4f..73afd952e 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox.py
@@ -15,12 +15,14 @@ short_description: Management of instances in Proxmox VE cluster
description:
- Allows you to create/delete/stop instances in Proxmox VE cluster.
- The module automatically detects containerization type (lxc for PVE 4, openvz for older).
- - Since community.general 4.0.0 on, there are no more default values, see O(proxmox_default_behavior).
+ - Since community.general 4.0.0 on, there are no more default values.
attributes:
check_mode:
support: none
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
password:
description:
@@ -47,28 +49,23 @@ options:
comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
[,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
- See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(3).
- Should not be used in conjunction with O(storage).
type: str
cores:
description:
- Specify number of cores per socket.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
type: int
cpus:
description:
- numbers of allocated cpus for instance
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
type: int
memory:
description:
- memory size in MB for instance
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(512).
type: int
swap:
description:
- swap memory size in MB for instance
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(0).
type: int
netif:
description:
@@ -101,7 +98,6 @@ options:
onboot:
description:
- specifies whether a VM will be started during system bootup
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
type: bool
storage:
description:
@@ -120,7 +116,6 @@ options:
cpuunits:
description:
- CPU weight for a VM
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1000).
type: int
nameserver:
description:
@@ -200,25 +195,6 @@ options:
- The special value V(host) configures the same timezone used by Proxmox host.
type: str
version_added: '7.1.0'
- proxmox_default_behavior:
- description:
- - As of community.general 4.0.0, various options no longer have default values.
- These default values caused problems when users expected different behavior from Proxmox
- by default or filled options which caused problems when set.
- - The value V(compatibility) (default before community.general 4.0.0) will ensure that the default values
- are used when the values are not explicitly specified by the user. The new default is V(no_defaults),
- which makes sure these options have no defaults.
- - This affects the O(disk), O(cores), O(cpus), O(memory), O(onboot), O(swap), and O(cpuunits) options.
- - >
- This parameter is now B(deprecated) and it will be removed in community.general 10.0.0.
- By then, the module's behavior should be to not set default values, equivalent to V(no_defaults).
- If a consistent set of defaults is needed, the playbook or role should be responsible for setting it.
- type: str
- default: no_defaults
- choices:
- - compatibility
- - no_defaults
- version_added: "1.3.0"
clone:
description:
- ID of the container to be cloned.
@@ -242,6 +218,7 @@ author: Sergei Antipov (@UnderGreen)
seealso:
- module: community.general.proxmox_vm_info
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.proxmox.selection
- community.general.attributes
@@ -785,8 +762,6 @@ def main():
description=dict(type='str'),
hookscript=dict(type='str'),
timezone=dict(type='str'),
- proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults'],
- removed_in_version='9.0.0', removed_from_collection='community.general'),
clone=dict(type='int'),
clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
tags=dict(type='list', elements='str')
@@ -827,20 +802,6 @@ def main():
timeout = module.params['timeout']
clone = module.params['clone']
- if module.params['proxmox_default_behavior'] == 'compatibility':
- old_default_values = dict(
- disk="3",
- cores=1,
- cpus=1,
- memory=512,
- swap=0,
- onboot=False,
- cpuunits=1000,
- )
- for param, value in old_default_values.items():
- if module.params[param] is None:
- module.params[param] = value
-
# If vmid not set get the Next VM id from ProxmoxAPI
# If hostname is set get the VM id from ProxmoxAPI
if not vmid and state == 'present':
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_disk.py b/ansible_collections/community/general/plugins/modules/proxmox_disk.py
index 69a7300df..83cdbeee5 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_disk.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_disk.py
@@ -21,6 +21,8 @@ attributes:
support: none
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
name:
description:
@@ -325,6 +327,7 @@ options:
- The drive's worldwide name, encoded as 16 bytes hex string, prefixed by V(0x).
type: str
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
'''
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
index 7435695a9..f3ff212bf 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
@@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE domains
version_added: 1.3.0
description:
- Retrieve information about one or more Proxmox VE domains.
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
domain:
description:
@@ -24,6 +27,7 @@ options:
type: str
author: Tristan Le Guern (@tleguern)
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_group_info.py b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
index 531a9dae7..eda1fe04d 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
@@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE groups
version_added: 1.3.0
description:
- Retrieve information about one or more Proxmox VE groups
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
group:
description:
@@ -24,6 +27,7 @@ options:
type: str
author: Tristan Le Guern (@tleguern)
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
index 8779dcdc1..9fe805c7a 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
@@ -21,6 +21,8 @@ attributes:
support: none
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
archive:
description:
@@ -517,6 +519,16 @@ options:
default: '2.0'
type: dict
version_added: 7.1.0
+ usb:
+ description:
+ - A hash/dictionary of USB devices for the VM. O(usb='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(usb[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C(host="value|spice",mapping="value",usb3="1|0").
+ - host is either C(spice) or the USB id/port.
+ - Option C(mapping) is the mapped USB device name.
+ - Option C(usb3) enables USB 3 support.
+ type: dict
+ version_added: 9.0.0
update:
description:
- If V(true), the VM will be updated with new value.
@@ -579,6 +591,7 @@ options:
seealso:
- module: community.general.proxmox_vm_info
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.proxmox.selection
- community.general.attributes
@@ -1091,7 +1104,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
)
# Convert all dict in kwargs to elements.
- # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
+ # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n], usb[n]
for k in list(kwargs.keys()):
if isinstance(kwargs[k], dict):
kwargs.update(kwargs[k])
@@ -1308,6 +1321,7 @@ def main():
storage=dict(type='str', required=True),
version=dict(type='str', choices=['2.0', '1.2'], default='2.0')
)),
+ usb=dict(type='dict'),
update=dict(type='bool', default=False),
update_unsafe=dict(type='bool', default=False),
vcpus=dict(type='int'),
@@ -1513,6 +1527,7 @@ def main():
tdf=module.params['tdf'],
template=module.params['template'],
tpmstate0=module.params['tpmstate0'],
+ usb=module.params['usb'],
vcpus=module.params['vcpus'],
vga=module.params['vga'],
virtio=module.params['virtio'],
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/proxmox_nic.py
index 9afe49447..6e94ed0bb 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_nic.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_nic.py
@@ -21,6 +21,8 @@ attributes:
support: full
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
bridge:
description:
@@ -94,6 +96,7 @@ options:
- Specifies the instance ID.
type: int
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
'''
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_node_info.py b/ansible_collections/community/general/plugins/modules/proxmox_node_info.py
index 82ef7aa38..51d8745c0 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_node_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_node_info.py
@@ -17,7 +17,11 @@ version_added: 8.2.0
description:
- Retrieve information about one or more Proxmox VE nodes.
author: John Berninger (@jwbernin)
+attributes:
+ action_group:
+ version_added: 9.0.0
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_pool.py b/ansible_collections/community/general/plugins/modules/proxmox_pool.py
index 704632070..5089ec3be 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_pool.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_pool.py
@@ -21,6 +21,8 @@ attributes:
support: full
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
poolid:
description:
@@ -42,8 +44,9 @@ options:
type: str
extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.attributes
+ - community.general.proxmox.actiongroup_proxmox
+ - community.general.proxmox.documentation
+ - community.general.attributes
"""
EXAMPLES = """
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py b/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py
index 7d6b24949..b26082f97 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py
@@ -20,6 +20,8 @@ attributes:
support: full
diff_mode:
support: full
+ action_group:
+ version_added: 9.0.0
options:
poolid:
description:
@@ -48,8 +50,9 @@ options:
type: str
extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.attributes
+ - community.general.proxmox.actiongroup_proxmox
+ - community.general.proxmox.documentation
+ - community.general.attributes
"""
EXAMPLES = """
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/proxmox_snap.py
index 4991423c2..4f7b345b8 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_snap.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_snap.py
@@ -21,6 +21,8 @@ attributes:
support: full
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
hostname:
description:
@@ -89,8 +91,9 @@ notes:
requirements: [ "proxmoxer", "requests" ]
author: Jeffrey van Pelt (@Thulium-Drake)
extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.attributes
+ - community.general.proxmox.actiongroup_proxmox
+ - community.general.proxmox.documentation
+ - community.general.attributes
'''
EXAMPLES = r'''
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py
index 498490fe4..b777870e5 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py
@@ -17,6 +17,9 @@ short_description: List content from a Proxmox VE storage
version_added: 8.2.0
description:
- Retrieves information about stored objects on a specific storage attached to a node.
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
storage:
description:
@@ -41,6 +44,7 @@ options:
type: int
author: Julian Vanden Broeck (@l00ptr)
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
index 3c29e59cf..fd5a6ee0d 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
@@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE storages
version_added: 2.2.0
description:
- Retrieve information about one or more Proxmox VE storages.
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
storage:
description:
@@ -28,6 +31,7 @@ options:
type: str
author: Tristan Le Guern (@tleguern)
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
index d31a04980..65a07566a 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
@@ -17,6 +17,9 @@ version_added: 3.8.0
description:
- Retrieve information about one or more Proxmox VE tasks.
author: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
node:
description:
@@ -29,9 +32,10 @@ options:
aliases: ['upid', 'name']
type: str
extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.proxmox.actiongroup_proxmox
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
'''
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_template.py b/ansible_collections/community/general/plugins/modules/proxmox_template.py
index 615bfc182..f73109931 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_template.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_template.py
@@ -20,6 +20,8 @@ attributes:
support: none
diff_mode:
support: none
+ action_group:
+ version_added: 9.0.0
options:
node:
description:
@@ -69,6 +71,7 @@ notes:
- C(proxmoxer) >= 1.2.0 requires C(requests_toolbelt) to upload files larger than 256 MB.
author: Sergei Antipov (@UnderGreen)
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
'''
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
index 20154528a..8680dec7c 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
@@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE users
version_added: 1.3.0
description:
- Retrieve information about one or more Proxmox VE users
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
domain:
description:
@@ -33,6 +36,7 @@ options:
type: str
author: Tristan Le Guern (@tleguern)
extends_documentation_fragment:
+ - community.general.proxmox.actiongroup_proxmox
- community.general.proxmox.documentation
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py b/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py
index 30342b684..39d8307a4 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py
@@ -17,6 +17,9 @@ version_added: 7.2.0
description:
- Retrieve information about one or more Proxmox VE virtual machines.
author: 'Sergei Antipov (@UnderGreen) <greendayonfire at gmail dot com>'
+attributes:
+ action_group:
+ version_added: 9.0.0
options:
node:
description:
@@ -55,9 +58,10 @@ options:
default: none
version_added: 8.1.0
extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.proxmox.actiongroup_proxmox
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
"""
EXAMPLES = """
diff --git a/ansible_collections/community/general/plugins/modules/puppet.py b/ansible_collections/community/general/plugins/modules/puppet.py
index b28583fe0..073a08324 100644
--- a/ansible_collections/community/general/plugins/modules/puppet.py
+++ b/ansible_collections/community/general/plugins/modules/puppet.py
@@ -101,6 +101,12 @@ options:
- Whether to print a transaction summary.
type: bool
default: false
+ waitforlock:
+ description:
+ - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting.
+ - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes and V(h) for hours.
+ type: str
+ version_added: 9.0.0
verbose:
description:
- Print extra information.
@@ -159,6 +165,14 @@ EXAMPLES = r'''
skip_tags:
- service
+- name: Wait 30 seconds for any current puppet runs to finish
+ community.general.puppet:
+ waitforlock: 30
+
+- name: Wait 5 minutes for any current puppet runs to finish
+ community.general.puppet:
+ waitforlock: 5m
+
- name: Run puppet agent in noop mode
community.general.puppet:
noop: true
@@ -214,6 +228,7 @@ def main():
skip_tags=dict(type='list', elements='str'),
execute=dict(type='str'),
summarize=dict(type='bool', default=False),
+ waitforlock=dict(type='str'),
debug=dict(type='bool', default=False),
verbose=dict(type='bool', default=False),
use_srv_records=dict(type='bool'),
@@ -247,11 +262,11 @@ def main():
runner = puppet_utils.puppet_runner(module)
if not p['manifest'] and not p['execute']:
- args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records"
+ args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records waitforlock"
with runner(args_order) as ctx:
rc, stdout, stderr = ctx.run()
else:
- args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose"
+ args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose waitforlock"
with runner(args_order) as ctx:
rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']])
diff --git a/ansible_collections/community/general/plugins/modules/rax.py b/ansible_collections/community/general/plugins/modules/rax.py
deleted file mode 100644
index 76e429944..000000000
--- a/ansible_collections/community/general/plugins/modules/rax.py
+++ /dev/null
@@ -1,903 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax
-short_description: Create / delete an instance in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud instance and optionally
- waits for it to be 'running'.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- auto_increment:
- description:
- - Whether or not to increment a single number with the name of the
- created servers. Only applicable when used with the O(group) attribute
- or meta key.
- type: bool
- default: true
- boot_from_volume:
- description:
- - Whether or not to boot the instance from a Cloud Block Storage volume.
- If V(true) and O(image) is specified a new volume will be created at
- boot time. O(boot_volume_size) is required with O(image) to create a
- new volume at boot time.
- type: bool
- default: false
- boot_volume:
- type: str
- description:
- - Cloud Block Storage ID or Name to use as the boot volume of the
- instance
- boot_volume_size:
- type: int
- description:
- - Size of the volume to create in Gigabytes. This is only required with
- O(image) and O(boot_from_volume).
- default: 100
- boot_volume_terminate:
- description:
- - Whether the O(boot_volume) or newly created volume from O(image) will
- be terminated when the server is terminated
- type: bool
- default: false
- config_drive:
- description:
- - Attach read-only configuration drive to server as label config-2
- type: bool
- default: false
- count:
- type: int
- description:
- - number of instances to launch
- default: 1
- count_offset:
- type: int
- description:
- - number count to start at
- default: 1
- disk_config:
- type: str
- description:
- - Disk partitioning strategy
- - If not specified it will assume the value V(auto).
- choices:
- - auto
- - manual
- exact_count:
- description:
- - Explicitly ensure an exact count of instances, used with
- state=active/present. If specified as V(true) and O(count) is less than
- the servers matched, servers will be deleted to match the count. If
- the number of matched servers is fewer than specified in O(count)
- additional servers will be added.
- type: bool
- default: false
- extra_client_args:
- type: dict
- default: {}
- description:
- - A hash of key/value pairs to be used when creating the cloudservers
- client. This is considered an advanced option, use it wisely and
- with caution.
- extra_create_args:
- type: dict
- default: {}
- description:
- - A hash of key/value pairs to be used when creating a new server.
- This is considered an advanced option, use it wisely and with caution.
- files:
- type: dict
- default: {}
- description:
- - Files to insert into the instance. remotefilename:localcontent
- flavor:
- type: str
- description:
- - flavor to use for the instance
- group:
- type: str
- description:
- - host group to assign to server, is also used for idempotent operations
- to ensure a specific number of instances
- image:
- type: str
- description:
- - image to use for the instance. Can be an C(id), C(human_id) or C(name).
- With O(boot_from_volume), a Cloud Block Storage volume will be created
- with this image
- instance_ids:
- type: list
- elements: str
- description:
- - list of instance ids, currently only used when state='absent' to
- remove instances
- key_name:
- type: str
- description:
- - key pair to use on the instance
- aliases:
- - keypair
- meta:
- type: dict
- default: {}
- description:
- - A hash of metadata to associate with the instance
- name:
- type: str
- description:
- - Name to give the instance
- networks:
- type: list
- elements: str
- description:
- - The network to attach to the instances. If specified, you must include
- ALL networks including the public and private interfaces. Can be C(id)
- or C(label).
- default:
- - public
- - private
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- user_data:
- type: str
- description:
- - Data to be uploaded to the servers config drive. This option implies
- O(config_drive). Can be a file path or a string
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- type: bool
- default: false
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Jesse Keating (@omgjlk)"
- - "Matt Martz (@sivel)"
-notes:
- - O(exact_count) can be "destructive" if the number of running servers in
- the O(group) is larger than that specified in O(count). In such a case, the
- O(state) is effectively set to V(absent) and the extra servers are deleted.
- In the case of deletion, the returned data structure will have RV(ignore:action)
- set to V(delete), and the oldest servers in the group will be deleted.
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a Cloud Server
- gather_facts: false
- tasks:
- - name: Server build request
- local_action:
- module: rax
- credentials: ~/.raxpub
- name: rax-test1
- flavor: 5
- image: b11d9567-e412-4255-96b9-bd63ab23bcfe
- key_name: my_rackspace_key
- files:
- /root/test.txt: /home/localuser/test.txt
- wait: true
- state: present
- networks:
- - private
- - public
- register: rax
-
-- name: Build an exact count of cloud servers with incremented names
- hosts: local
- gather_facts: false
- tasks:
- - name: Server build requests
- local_action:
- module: rax
- credentials: ~/.raxpub
- name: test%03d.example.org
- flavor: performance1-1
- image: ubuntu-1204-lts-precise-pangolin
- state: present
- count: 10
- count_offset: 10
- exact_count: true
- group: test
- wait: true
- register: rax
-'''
-
-import json
-import os
-import re
-import time
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
- rax_find_image, rax_find_network, rax_find_volume,
- rax_required_together, rax_to_dict, setup_rax_module)
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.six import string_types
-
-
-def rax_find_server_image(module, server, image, boot_volume):
- if not image and boot_volume:
- vol = rax_find_bootable_volume(module, pyrax, server,
- exit=False)
- if not vol:
- return None
- volume_image_metadata = vol.volume_image_metadata
- vol_image_id = volume_image_metadata.get('image_id')
- if vol_image_id:
- server_image = rax_find_image(module, pyrax,
- vol_image_id, exit=False)
- if server_image:
- server.image = dict(id=server_image)
-
- # Match image IDs taking care of boot from volume
- if image and not server.image:
- vol = rax_find_bootable_volume(module, pyrax, server)
- volume_image_metadata = vol.volume_image_metadata
- vol_image_id = volume_image_metadata.get('image_id')
- if not vol_image_id:
- return None
- server_image = rax_find_image(module, pyrax,
- vol_image_id, exit=False)
- if image != server_image:
- return None
-
- server.image = dict(id=server_image)
- elif image and server.image['id'] != image:
- return None
-
- return server.image
-
-
-def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
- files=None, wait=True, wait_timeout=300, disk_config=None,
- group=None, nics=None, extra_create_args=None, user_data=None,
- config_drive=False, existing=None, block_device_mapping_v2=None):
- names = [] if names is None else names
- meta = {} if meta is None else meta
- files = {} if files is None else files
- nics = [] if nics is None else nics
- extra_create_args = {} if extra_create_args is None else extra_create_args
- existing = [] if existing is None else existing
- block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
-
- cs = pyrax.cloudservers
- changed = False
-
- if user_data:
- config_drive = True
-
- if user_data and os.path.isfile(os.path.expanduser(user_data)):
- try:
- user_data = os.path.expanduser(user_data)
- f = open(user_data)
- user_data = f.read()
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % user_data)
-
- # Handle the file contents
- for rpath in files.keys():
- lpath = os.path.expanduser(files[rpath])
- try:
- fileobj = open(lpath, 'r')
- files[rpath] = fileobj.read()
- fileobj.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % lpath)
- try:
- servers = []
- bdmv2 = block_device_mapping_v2
- for name in names:
- servers.append(cs.servers.create(name=name, image=image,
- flavor=flavor, meta=meta,
- key_name=key_name,
- files=files, nics=nics,
- disk_config=disk_config,
- config_drive=config_drive,
- userdata=user_data,
- block_device_mapping_v2=bdmv2,
- **extra_create_args))
- except Exception as e:
- if e.message:
- msg = str(e.message)
- else:
- msg = repr(e)
- module.fail_json(msg=msg)
- else:
- changed = True
-
- if wait:
- end_time = time.time() + wait_timeout
- infinite = wait_timeout == 0
- while infinite or time.time() < end_time:
- for server in servers:
- try:
- server.get()
- except Exception:
- server.status = 'ERROR'
-
- if not filter(lambda s: s.status not in FINAL_STATUSES,
- servers):
- break
- time.sleep(5)
-
- success = []
- error = []
- timeout = []
- for server in servers:
- try:
- server.get()
- except Exception:
- server.status = 'ERROR'
- instance = rax_to_dict(server, 'server')
- if server.status == 'ACTIVE' or not wait:
- success.append(instance)
- elif server.status == 'ERROR':
- error.append(instance)
- elif wait:
- timeout.append(instance)
-
- untouched = [rax_to_dict(s, 'server') for s in existing]
- instances = success + untouched
-
- results = {
- 'changed': changed,
- 'action': 'create',
- 'instances': instances,
- 'success': success,
- 'error': error,
- 'timeout': timeout,
- 'instance_ids': {
- 'instances': [i['id'] for i in instances],
- 'success': [i['id'] for i in success],
- 'error': [i['id'] for i in error],
- 'timeout': [i['id'] for i in timeout]
- }
- }
-
- if timeout:
- results['msg'] = 'Timeout waiting for all servers to build'
- elif error:
- results['msg'] = 'Failed to build all servers'
-
- if 'msg' in results:
- module.fail_json(**results)
- else:
- module.exit_json(**results)
-
-
-def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
- instance_ids = [] if instance_ids is None else instance_ids
- kept = [] if kept is None else kept
-
- cs = pyrax.cloudservers
-
- changed = False
- instances = {}
- servers = []
-
- for instance_id in instance_ids:
- servers.append(cs.servers.get(instance_id))
-
- for server in servers:
- try:
- server.delete()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- changed = True
-
- instance = rax_to_dict(server, 'server')
- instances[instance['id']] = instance
-
- # If requested, wait for server deletion
- if wait:
- end_time = time.time() + wait_timeout
- infinite = wait_timeout == 0
- while infinite or time.time() < end_time:
- for server in servers:
- instance_id = server.id
- try:
- server.get()
- except Exception:
- instances[instance_id]['status'] = 'DELETED'
- instances[instance_id]['rax_status'] = 'DELETED'
-
- if not filter(lambda s: s['status'] not in ('', 'DELETED',
- 'ERROR'),
- instances.values()):
- break
-
- time.sleep(5)
-
- timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
- instances.values())
- error = filter(lambda s: s['status'] in ('ERROR'),
- instances.values())
- success = filter(lambda s: s['status'] in ('', 'DELETED'),
- instances.values())
-
- instances = [rax_to_dict(s, 'server') for s in kept]
-
- results = {
- 'changed': changed,
- 'action': 'delete',
- 'instances': instances,
- 'success': success,
- 'error': error,
- 'timeout': timeout,
- 'instance_ids': {
- 'instances': [i['id'] for i in instances],
- 'success': [i['id'] for i in success],
- 'error': [i['id'] for i in error],
- 'timeout': [i['id'] for i in timeout]
- }
- }
-
- if timeout:
- results['msg'] = 'Timeout waiting for all servers to delete'
- elif error:
- results['msg'] = 'Failed to delete all servers'
-
- if 'msg' in results:
- module.fail_json(**results)
- else:
- module.exit_json(**results)
-
-
-def cloudservers(module, state=None, name=None, flavor=None, image=None,
- meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
- disk_config=None, count=1, group=None, instance_ids=None,
- exact_count=False, networks=None, count_offset=0,
- auto_increment=False, extra_create_args=None, user_data=None,
- config_drive=False, boot_from_volume=False,
- boot_volume=None, boot_volume_size=None,
- boot_volume_terminate=False):
- meta = {} if meta is None else meta
- files = {} if files is None else files
- instance_ids = [] if instance_ids is None else instance_ids
- networks = [] if networks is None else networks
- extra_create_args = {} if extra_create_args is None else extra_create_args
-
- cs = pyrax.cloudservers
- cnw = pyrax.cloud_networks
- if not cnw:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present' or (state == 'absent' and instance_ids is None):
- if not boot_from_volume and not boot_volume and not image:
- module.fail_json(msg='image is required for the "rax" module')
-
- for arg, value in dict(name=name, flavor=flavor).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax" module' %
- arg)
-
- if boot_from_volume and not image and not boot_volume:
- module.fail_json(msg='image or boot_volume are required for the '
- '"rax" with boot_from_volume')
-
- if boot_from_volume and image and not boot_volume_size:
- module.fail_json(msg='boot_volume_size is required for the "rax" '
- 'module with boot_from_volume and image')
-
- if boot_from_volume and image and boot_volume:
- image = None
-
- servers = []
-
- # Add the group meta key
- if group and 'group' not in meta:
- meta['group'] = group
- elif 'group' in meta and group is None:
- group = meta['group']
-
- # Normalize and ensure all metadata values are strings
- for k, v in meta.items():
- if isinstance(v, list):
- meta[k] = ','.join(['%s' % i for i in v])
- elif isinstance(v, dict):
- meta[k] = json.dumps(v)
- elif not isinstance(v, string_types):
- meta[k] = '%s' % v
-
- # When using state=absent with group, the absent block won't match the
- # names properly. Use the exact_count functionality to decrease the count
- # to the desired level
- was_absent = False
- if group is not None and state == 'absent':
- exact_count = True
- state = 'present'
- was_absent = True
-
- if image:
- image = rax_find_image(module, pyrax, image)
-
- nics = []
- if networks:
- for network in networks:
- nics.extend(rax_find_network(module, pyrax, network))
-
- # act on the state
- if state == 'present':
- # Idempotent ensurance of a specific count of servers
- if exact_count is not False:
- # See if we can find servers that match our options
- if group is None:
- module.fail_json(msg='"group" must be provided when using '
- '"exact_count"')
-
- if auto_increment:
- numbers = set()
-
- # See if the name is a printf like string, if not append
- # %d to the end
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message)
-
- # regex pattern to match printf formatting
- pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
- for server in cs.servers.list():
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
- if server.metadata.get('group') == group:
- servers.append(server)
- match = re.search(pattern, server.name)
- if match:
- number = int(match.group(1))
- numbers.add(number)
-
- number_range = xrange(count_offset, count_offset + count)
- available_numbers = list(set(number_range)
- .difference(numbers))
- else: # Not auto incrementing
- for server in cs.servers.list():
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
- if server.metadata.get('group') == group:
- servers.append(server)
- # available_numbers not needed here, we inspect auto_increment
- # again later
-
- # If state was absent but the count was changed,
- # assume we only wanted to remove that number of instances
- if was_absent:
- diff = len(servers) - count
- if diff < 0:
- count = 0
- else:
- count = diff
-
- if len(servers) > count:
- # We have more servers than we need, set state='absent'
- # and delete the extras, this should delete the oldest
- state = 'absent'
- kept = servers[:count]
- del servers[:count]
- instance_ids = []
- for server in servers:
- instance_ids.append(server.id)
- delete(module, instance_ids=instance_ids, wait=wait,
- wait_timeout=wait_timeout, kept=kept)
- elif len(servers) < count:
- # we have fewer servers than we need
- if auto_increment:
- # auto incrementing server numbers
- names = []
- name_slice = count - len(servers)
- numbers_to_use = available_numbers[:name_slice]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- # We are not auto incrementing server numbers,
- # create a list of 'name' that matches how many we need
- names = [name] * (count - len(servers))
- else:
- # we have the right number of servers, just return info
- # about all of the matched servers
- instances = []
- instance_ids = []
- for server in servers:
- instances.append(rax_to_dict(server, 'server'))
- instance_ids.append(server.id)
- module.exit_json(changed=False, action=None,
- instances=instances,
- success=[], error=[], timeout=[],
- instance_ids={'instances': instance_ids,
- 'success': [], 'error': [],
- 'timeout': []})
- else: # not called with exact_count=True
- if group is not None:
- if auto_increment:
- # we are auto incrementing server numbers, but not with
- # exact_count
- numbers = set()
-
- # See if the name is a printf like string, if not append
- # %d to the end
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message)
-
- # regex pattern to match printf formatting
- pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
- for server in cs.servers.list():
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
- if server.metadata.get('group') == group:
- servers.append(server)
- match = re.search(pattern, server.name)
- if match:
- number = int(match.group(1))
- numbers.add(number)
-
- number_range = xrange(count_offset,
- count_offset + count + len(numbers))
- available_numbers = list(set(number_range)
- .difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- # Not auto incrementing
- names = [name] * count
- else:
- # No group was specified, and not using exact_count
- # Perform more simplistic matching
- search_opts = {
- 'name': '^%s$' % name,
- 'flavor': flavor
- }
- servers = []
- for server in cs.servers.list(search_opts=search_opts):
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
-
- if not rax_find_server_image(module, server, image,
- boot_volume):
- continue
-
- # Ignore servers with non matching metadata
- if server.metadata != meta:
- continue
- servers.append(server)
-
- if len(servers) >= count:
- # We have more servers than were requested, don't do
- # anything. Not running with exact_count=True, so we assume
- # more is OK
- instances = []
- for server in servers:
- instances.append(rax_to_dict(server, 'server'))
-
- instance_ids = [i['id'] for i in instances]
- module.exit_json(changed=False, action=None,
- instances=instances, success=[], error=[],
- timeout=[],
- instance_ids={'instances': instance_ids,
- 'success': [], 'error': [],
- 'timeout': []})
-
- # We need more servers to reach out target, create names for
- # them, we aren't performing auto_increment here
- names = [name] * (count - len(servers))
-
- block_device_mapping_v2 = []
- if boot_from_volume:
- mapping = {
- 'boot_index': '0',
- 'delete_on_termination': boot_volume_terminate,
- 'destination_type': 'volume',
- }
- if image:
- mapping.update({
- 'uuid': image,
- 'source_type': 'image',
- 'volume_size': boot_volume_size,
- })
- image = None
- elif boot_volume:
- volume = rax_find_volume(module, pyrax, boot_volume)
- mapping.update({
- 'uuid': pyrax.utils.get_id(volume),
- 'source_type': 'volume',
- })
- block_device_mapping_v2.append(mapping)
-
- create(module, names=names, flavor=flavor, image=image,
- meta=meta, key_name=key_name, files=files, wait=wait,
- wait_timeout=wait_timeout, disk_config=disk_config, group=group,
- nics=nics, extra_create_args=extra_create_args,
- user_data=user_data, config_drive=config_drive,
- existing=servers,
- block_device_mapping_v2=block_device_mapping_v2)
-
- elif state == 'absent':
- if instance_ids is None:
- # We weren't given an explicit list of server IDs to delete
- # Let's match instead
- search_opts = {
- 'name': '^%s$' % name,
- 'flavor': flavor
- }
- for server in cs.servers.list(search_opts=search_opts):
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
-
- if not rax_find_server_image(module, server, image,
- boot_volume):
- continue
-
- # Ignore servers with non matching metadata
- if meta != server.metadata:
- continue
-
- servers.append(server)
-
- # Build a list of server IDs to delete
- instance_ids = []
- for server in servers:
- if len(instance_ids) < count:
- instance_ids.append(server.id)
- else:
- break
-
- if not instance_ids:
- # No server IDs were matched for deletion, or no IDs were
- # explicitly provided, just exit and don't do anything
- module.exit_json(changed=False, action=None, instances=[],
- success=[], error=[], timeout=[],
- instance_ids={'instances': [],
- 'success': [], 'error': [],
- 'timeout': []})
-
- delete(module, instance_ids=instance_ids, wait=wait,
- wait_timeout=wait_timeout)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- auto_increment=dict(default=True, type='bool'),
- boot_from_volume=dict(default=False, type='bool'),
- boot_volume=dict(type='str'),
- boot_volume_size=dict(type='int', default=100),
- boot_volume_terminate=dict(type='bool', default=False),
- config_drive=dict(default=False, type='bool'),
- count=dict(default=1, type='int'),
- count_offset=dict(default=1, type='int'),
- disk_config=dict(choices=['auto', 'manual']),
- exact_count=dict(default=False, type='bool'),
- extra_client_args=dict(type='dict', default={}),
- extra_create_args=dict(type='dict', default={}),
- files=dict(type='dict', default={}),
- flavor=dict(),
- group=dict(),
- image=dict(),
- instance_ids=dict(type='list', elements='str'),
- key_name=dict(aliases=['keypair']),
- meta=dict(type='dict', default={}),
- name=dict(),
- networks=dict(type='list', elements='str', default=['public', 'private']),
- state=dict(default='present', choices=['present', 'absent']),
- user_data=dict(no_log=True),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=300, type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- auto_increment = module.params.get('auto_increment')
- boot_from_volume = module.params.get('boot_from_volume')
- boot_volume = module.params.get('boot_volume')
- boot_volume_size = module.params.get('boot_volume_size')
- boot_volume_terminate = module.params.get('boot_volume_terminate')
- config_drive = module.params.get('config_drive')
- count = module.params.get('count')
- count_offset = module.params.get('count_offset')
- disk_config = module.params.get('disk_config')
- if disk_config:
- disk_config = disk_config.upper()
- exact_count = module.params.get('exact_count', False)
- extra_client_args = module.params.get('extra_client_args')
- extra_create_args = module.params.get('extra_create_args')
- files = module.params.get('files')
- flavor = module.params.get('flavor')
- group = module.params.get('group')
- image = module.params.get('image')
- instance_ids = module.params.get('instance_ids')
- key_name = module.params.get('key_name')
- meta = module.params.get('meta')
- name = module.params.get('name')
- networks = module.params.get('networks')
- state = module.params.get('state')
- user_data = module.params.get('user_data')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- setup_rax_module(module, pyrax)
-
- if extra_client_args:
- pyrax.cloudservers = pyrax.connect_to_cloudservers(
- region=pyrax.cloudservers.client.region_name,
- **extra_client_args)
- client = pyrax.cloudservers.client
- if 'bypass_url' in extra_client_args:
- client.management_url = extra_client_args['bypass_url']
-
- if pyrax.cloudservers is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- cloudservers(module, state=state, name=name, flavor=flavor,
- image=image, meta=meta, key_name=key_name, files=files,
- wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
- count=count, group=group, instance_ids=instance_ids,
- exact_count=exact_count, networks=networks,
- count_offset=count_offset, auto_increment=auto_increment,
- extra_create_args=extra_create_args, user_data=user_data,
- config_drive=config_drive, boot_from_volume=boot_from_volume,
- boot_volume=boot_volume, boot_volume_size=boot_volume_size,
- boot_volume_terminate=boot_volume_terminate)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs.py b/ansible_collections/community/general/plugins/modules/rax_cbs.py
deleted file mode 100644
index 77e7cebad..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_cbs.py
+++ /dev/null
@@ -1,235 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cbs
-short_description: Manipulate Rackspace Cloud Block Storage Volumes
-description:
- - Manipulate Rackspace Cloud Block Storage Volumes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- description:
- type: str
- description:
- - Description to give the volume being created.
- image:
- type: str
- description:
- - Image to use for bootable volumes. Can be an C(id), C(human_id) or
- C(name). This option requires C(pyrax>=1.9.3).
- meta:
- type: dict
- default: {}
- description:
- - A hash of metadata to associate with the volume.
- name:
- type: str
- description:
- - Name to give the volume being created.
- required: true
- size:
- type: int
- description:
- - Size of the volume to create in Gigabytes.
- default: 100
- snapshot_id:
- type: str
- description:
- - The id of the snapshot to create the volume from.
- state:
- type: str
- description:
- - Indicate desired state of the resource.
- choices:
- - present
- - absent
- default: present
- volume_type:
- type: str
- description:
- - Type of the volume being created.
- choices:
- - SATA
- - SSD
- default: SATA
- wait:
- description:
- - Wait for the volume to be in state C(available) before returning.
- type: bool
- default: false
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds.
- default: 300
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a Block Storage Volume
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Storage volume create request
- local_action:
- module: rax_cbs
- credentials: ~/.raxpub
- name: my-volume
- description: My Volume
- volume_type: SSD
- size: 150
- region: DFW
- wait: true
- state: present
- meta:
- app: my-cool-app
- register: my_volume
-'''
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
- rax_required_together, rax_to_dict, setup_rax_module)
-
-
-def cloud_block_storage(module, state, name, description, meta, size,
- snapshot_id, volume_type, wait, wait_timeout,
- image):
- changed = False
- volume = None
- instance = {}
-
- cbs = pyrax.cloud_blockstorage
-
- if cbs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if image:
- # pyrax<1.9.3 did not have support for specifying an image when
- # creating a volume which is required for bootable volumes
- if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
- module.fail_json(msg='Creating a bootable volume requires '
- 'pyrax>=1.9.3')
- image = rax_find_image(module, pyrax, image)
-
- volume = rax_find_volume(module, pyrax, name)
-
- if state == 'present':
- if not volume:
- kwargs = dict()
- if image:
- kwargs['image'] = image
- try:
- volume = cbs.create(name, size=size, volume_type=volume_type,
- description=description,
- metadata=meta,
- snapshot_id=snapshot_id, **kwargs)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- if wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_for_build(volume, interval=5,
- attempts=attempts)
-
- volume.get()
- instance = rax_to_dict(volume)
-
- result = dict(changed=changed, volume=instance)
-
- if volume.status == 'error':
- result['msg'] = '%s failed to build' % volume.id
- elif wait and volume.status not in VOLUME_STATUS:
- result['msg'] = 'Timeout waiting on %s' % volume.id
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- elif state == 'absent':
- if volume:
- instance = rax_to_dict(volume)
- try:
- volume.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, volume=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- description=dict(type='str'),
- image=dict(type='str'),
- meta=dict(type='dict', default={}),
- name=dict(required=True),
- size=dict(type='int', default=100),
- snapshot_id=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- description = module.params.get('description')
- image = module.params.get('image')
- meta = module.params.get('meta')
- name = module.params.get('name')
- size = module.params.get('size')
- snapshot_id = module.params.get('snapshot_id')
- state = module.params.get('state')
- volume_type = module.params.get('volume_type')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
-
- cloud_block_storage(module, state, name, description, meta, size,
- snapshot_id, volume_type, wait, wait_timeout,
- image)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
deleted file mode 100644
index 00b860a90..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cbs_attachments
-short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
-description:
- - Manipulate Rackspace Cloud Block Storage Volume Attachments
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- device:
- type: str
- description:
- - The device path to attach the volume to, e.g. /dev/xvde.
- - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
- volume:
- type: str
- description:
- - Name or id of the volume to attach/detach
- required: true
- server:
- type: str
- description:
- - Name or id of the server to attach/detach
- required: true
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- wait:
- description:
- - wait for the volume to be in 'in-use'/'available' state before returning
- type: bool
- default: false
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Attach a Block Storage Volume
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Storage volume attach request
- local_action:
- module: rax_cbs_attachments
- credentials: ~/.raxpub
- volume: my-volume
- server: my-server
- device: /dev/xvdd
- region: DFW
- wait: true
- state: present
- register: my_volume
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
- rax_argument_spec,
- rax_find_server,
- rax_find_volume,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def cloud_block_storage_attachments(module, state, volume, server, device,
- wait, wait_timeout):
- cbs = pyrax.cloud_blockstorage
- cs = pyrax.cloudservers
-
- if cbs is None or cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- changed = False
- instance = {}
-
- volume = rax_find_volume(module, pyrax, volume)
-
- if not volume:
- module.fail_json(msg='No matching storage volumes were found')
-
- if state == 'present':
- server = rax_find_server(module, pyrax, server)
-
- if (volume.attachments and
- volume.attachments[0]['server_id'] == server.id):
- changed = False
- elif volume.attachments:
- module.fail_json(msg='Volume is attached to another server')
- else:
- try:
- volume.attach_to_instance(server, mountpoint=device)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- volume.get()
-
- for key, value in vars(volume).items():
- if (isinstance(value, NON_CALLABLES) and
- not key.startswith('_')):
- instance[key] = value
-
- result = dict(changed=changed)
-
- if volume.status == 'error':
- result['msg'] = '%s failed to build' % volume.id
- elif wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_until(volume, 'status', 'in-use',
- interval=5, attempts=attempts)
-
- volume.get()
- result['volume'] = rax_to_dict(volume)
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- elif state == 'absent':
- server = rax_find_server(module, pyrax, server)
-
- if (volume.attachments and
- volume.attachments[0]['server_id'] == server.id):
- try:
- volume.detach()
- if wait:
- pyrax.utils.wait_until(volume, 'status', 'available',
- interval=3, attempts=0,
- verbose=False)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- volume.get()
- changed = True
- elif volume.attachments:
- module.fail_json(msg='Volume is attached to another server')
-
- result = dict(changed=changed, volume=rax_to_dict(volume))
-
- if volume.status == 'error':
- result['msg'] = '%s failed to build' % volume.id
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- module.exit_json(changed=changed, volume=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- device=dict(required=False),
- volume=dict(required=True),
- server=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- device = module.params.get('device')
- volume = module.params.get('volume')
- server = module.params.get('server')
- state = module.params.get('state')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
-
- cloud_block_storage_attachments(module, state, volume, server, device,
- wait, wait_timeout)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb.py b/ansible_collections/community/general/plugins/modules/rax_cdb.py
deleted file mode 100644
index 9538579fa..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_cdb.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cdb
-short_description: Create/delete or resize a Rackspace Cloud Databases instance
-description:
- - creates / deletes or resize a Rackspace Cloud Databases instance
- and optionally waits for it to be 'running'. The name option needs to be
- unique since it's used to identify the instance.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- name:
- type: str
- description:
- - Name of the databases server instance
- required: true
- flavor:
- type: int
- description:
- - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
- default: 1
- volume:
- type: int
- description:
- - Volume size of the database 1-150GB
- default: 2
- cdb_type:
- type: str
- description:
- - type of instance (i.e. MySQL, MariaDB, Percona)
- default: MySQL
- aliases: ['type']
- cdb_version:
- type: str
- description:
- - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
- - "The available choices are: V(5.1), V(5.6) and V(10)."
- default: '5.6'
- aliases: ['version']
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- type: bool
- default: false
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author: "Simon JAILLET (@jails)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a Cloud Databases
- gather_facts: false
- tasks:
- - name: Server build request
- local_action:
- module: rax_cdb
- credentials: ~/.raxpub
- region: IAD
- name: db-server1
- flavor: 1
- volume: 2
- cdb_type: MySQL
- cdb_version: 5.6
- wait: true
- state: present
- register: rax_db_server
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
-
-
-def find_instance(name):
-
- cdb = pyrax.cloud_databases
- instances = cdb.list()
- if instances:
- for instance in instances:
- if instance.name == name:
- return instance
- return False
-
-
-def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
- wait_timeout):
-
- for arg, value in dict(name=name, flavor=flavor,
- volume=volume, type=cdb_type, version=cdb_version
- ).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax_cdb"'
- ' module' % arg)
-
- if not (volume >= 1 and volume <= 150):
- module.fail_json(msg='volume is required to be between 1 and 150')
-
- cdb = pyrax.cloud_databases
-
- flavors = []
- for item in cdb.list_flavors():
- flavors.append(item.id)
-
- if not (flavor in flavors):
- module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
-
- changed = False
-
- instance = find_instance(name)
-
- if not instance:
- action = 'create'
- try:
- instance = cdb.create(name=name, flavor=flavor, volume=volume,
- type=cdb_type, version=cdb_version)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- else:
- action = None
-
- if instance.volume.size != volume:
- action = 'resize'
- if instance.volume.size > volume:
- module.fail_json(changed=False, action=action,
- msg='The new volume size must be larger than '
- 'the current volume size',
- cdb=rax_to_dict(instance))
- instance.resize_volume(volume)
- changed = True
-
- if int(instance.flavor.id) != flavor:
- action = 'resize'
- pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
- attempts=wait_timeout)
- instance.resize(flavor)
- changed = True
-
- if wait:
- pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
- attempts=wait_timeout)
-
- if wait and instance.status != 'ACTIVE':
- module.fail_json(changed=changed, action=action,
- cdb=rax_to_dict(instance),
- msg='Timeout waiting for "%s" databases instance to '
- 'be created' % name)
-
- module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
-
-
-def delete_instance(module, name, wait, wait_timeout):
-
- if not name:
- module.fail_json(msg='name is required for the "rax_cdb" module')
-
- changed = False
-
- instance = find_instance(name)
- if not instance:
- module.exit_json(changed=False, action='delete')
-
- try:
- instance.delete()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- if wait:
- pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
- attempts=wait_timeout)
-
- if wait and instance.status != 'SHUTDOWN':
- module.fail_json(changed=changed, action='delete',
- cdb=rax_to_dict(instance),
- msg='Timeout waiting for "%s" databases instance to '
- 'be deleted' % name)
-
- module.exit_json(changed=changed, action='delete',
- cdb=rax_to_dict(instance))
-
-
-def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
- wait_timeout):
-
- # act on the state
- if state == 'present':
- save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
- wait_timeout)
- elif state == 'absent':
- delete_instance(module, name, wait, wait_timeout)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- name=dict(type='str', required=True),
- flavor=dict(type='int', default=1),
- volume=dict(type='int', default=2),
- cdb_type=dict(type='str', default='MySQL', aliases=['type']),
- cdb_version=dict(type='str', default='5.6', aliases=['version']),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- name = module.params.get('name')
- flavor = module.params.get('flavor')
- volume = module.params.get('volume')
- cdb_type = module.params.get('cdb_type')
- cdb_version = module.params.get('cdb_version')
- state = module.params.get('state')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
- rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
deleted file mode 100644
index b0db11814..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: rax_cdb_database
-short_description: Create / delete a database in the Cloud Databases
-description:
- - create / delete a database in the Cloud Databases.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- cdb_id:
- type: str
- description:
- - The databases server UUID
- required: true
- name:
- type: str
- description:
- - Name to give to the database
- required: true
- character_set:
- type: str
- description:
- - Set of symbols and encodings
- default: 'utf8'
- collate:
- type: str
- description:
- - Set of rules for comparing characters in a character set
- default: 'utf8_general_ci'
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
-author: "Simon JAILLET (@jails)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a database in Cloud Databases
- tasks:
- - name: Database build request
- local_action:
- module: rax_cdb_database
- credentials: ~/.raxpub
- region: IAD
- cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
- name: db1
- state: present
- register: rax_db_database
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
-
-
-def find_database(instance, name):
- try:
- database = instance.get_database(name)
- except Exception:
- return False
-
- return database
-
-
-def save_database(module, cdb_id, name, character_set, collate):
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- database = find_database(instance, name)
-
- if not database:
- try:
- database = instance.create_database(name=name,
- character_set=character_set,
- collate=collate)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action='create',
- database=rax_to_dict(database))
-
-
-def delete_database(module, cdb_id, name):
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- database = find_database(instance, name)
-
- if database:
- try:
- database.delete()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action='delete',
- database=rax_to_dict(database))
-
-
-def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
-
- # act on the state
- if state == 'present':
- save_database(module, cdb_id, name, character_set, collate)
- elif state == 'absent':
- delete_database(module, cdb_id, name)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- cdb_id=dict(type='str', required=True),
- name=dict(type='str', required=True),
- character_set=dict(type='str', default='utf8'),
- collate=dict(type='str', default='utf8_general_ci'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- cdb_id = module.params.get('cdb_id')
- name = module.params.get('name')
- character_set = module.params.get('character_set')
- collate = module.params.get('collate')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
- rax_cdb_database(module, state, cdb_id, name, character_set, collate)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
deleted file mode 100644
index 6ee86c4fe..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cdb_user
-short_description: Create / delete a Rackspace Cloud Database
-description:
- - create / delete a database in the Cloud Databases.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- cdb_id:
- type: str
- description:
- - The databases server UUID
- required: true
- db_username:
- type: str
- description:
- - Name of the database user
- required: true
- db_password:
- type: str
- description:
- - Database user password
- required: true
- databases:
- type: list
- elements: str
- description:
- - Name of the databases that the user can access
- default: []
- host:
- type: str
- description:
- - Specifies the host from which a user is allowed to connect to
- the database. Possible values are a string containing an IPv4 address
- or "%" to allow connecting from any host
- default: '%'
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
-author: "Simon JAILLET (@jails)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a user in Cloud Databases
- tasks:
- - name: User build request
- local_action:
- module: rax_cdb_user
- credentials: ~/.raxpub
- region: IAD
- cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
- db_username: user1
- db_password: user1
- databases: ['db1']
- state: present
- register: rax_db_user
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_text
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
-
-
-def find_user(instance, name):
- try:
- user = instance.get_user(name)
- except Exception:
- return False
-
- return user
-
-
-def save_user(module, cdb_id, name, password, databases, host):
-
- for arg, value in dict(cdb_id=cdb_id, name=name).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax_cdb_user" '
- 'module' % arg)
-
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- user = find_user(instance, name)
-
- if not user:
- action = 'create'
- try:
- user = instance.create_user(name=name,
- password=password,
- database_names=databases,
- host=host)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
- else:
- action = 'update'
-
- if user.host != host:
- changed = True
-
- user.update(password=password, host=host)
-
- former_dbs = set([item.name for item in user.list_user_access()])
- databases = set(databases)
-
- if databases != former_dbs:
- try:
- revoke_dbs = [db for db in former_dbs if db not in databases]
- user.revoke_user_access(db_names=revoke_dbs)
-
- new_dbs = [db for db in databases if db not in former_dbs]
- user.grant_user_access(db_names=new_dbs)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
-
-
-def delete_user(module, cdb_id, name):
-
- for arg, value in dict(cdb_id=cdb_id, name=name).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax_cdb_user"'
- ' module' % arg)
-
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- user = find_user(instance, name)
-
- if user:
- try:
- user.delete()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action='delete')
-
-
-def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
-
- # act on the state
- if state == 'present':
- save_user(module, cdb_id, name, password, databases, host)
- elif state == 'absent':
- delete_user(module, cdb_id, name)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- cdb_id=dict(type='str', required=True),
- db_username=dict(type='str', required=True),
- db_password=dict(type='str', required=True, no_log=True),
- databases=dict(type='list', elements='str', default=[]),
- host=dict(type='str', default='%'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- cdb_id = module.params.get('cdb_id')
- name = module.params.get('db_username')
- password = module.params.get('db_password')
- databases = module.params.get('databases')
- host = to_text(module.params.get('host'), errors='surrogate_or_strict')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
- rax_cdb_user(module, state, cdb_id, name, password, databases, host)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb.py b/ansible_collections/community/general/plugins/modules/rax_clb.py
deleted file mode 100644
index 23c795f39..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_clb.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_clb
-short_description: Create / delete a load balancer in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud load balancer.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- algorithm:
- type: str
- description:
- - algorithm for the balancer being created
- choices:
- - RANDOM
- - LEAST_CONNECTIONS
- - ROUND_ROBIN
- - WEIGHTED_LEAST_CONNECTIONS
- - WEIGHTED_ROUND_ROBIN
- default: LEAST_CONNECTIONS
- meta:
- type: dict
- default: {}
- description:
- - A hash of metadata to associate with the instance
- name:
- type: str
- description:
- - Name to give the load balancer
- required: true
- port:
- type: int
- description:
- - Port for the balancer being created
- default: 80
- protocol:
- type: str
- description:
- - Protocol for the balancer being created
- choices:
- - DNS_TCP
- - DNS_UDP
- - FTP
- - HTTP
- - HTTPS
- - IMAPS
- - IMAPv4
- - LDAP
- - LDAPS
- - MYSQL
- - POP3
- - POP3S
- - SMTP
- - TCP
- - TCP_CLIENT_FIRST
- - UDP
- - UDP_STREAM
- - SFTP
- default: HTTP
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- timeout:
- type: int
- description:
- - timeout for communication between the balancer and the node
- default: 30
- type:
- type: str
- description:
- - type of interface for the balancer being created
- choices:
- - PUBLIC
- - SERVICENET
- default: PUBLIC
- vip_id:
- type: str
- description:
- - Virtual IP ID to use when creating the load balancer for purposes of
- sharing an IP with another load balancer of another protocol
- wait:
- description:
- - wait for the balancer to be in state 'running' before returning
- type: bool
- default: false
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a Load Balancer
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Load Balancer create request
- local_action:
- module: rax_clb
- credentials: ~/.raxpub
- name: my-lb
- port: 8080
- protocol: HTTP
- type: SERVICENET
- timeout: 30
- region: DFW
- wait: true
- state: present
- meta:
- app: my-cool-app
- register: my_lb
-'''
-
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
- CLB_PROTOCOLS,
- rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
- vip_type, timeout, wait, wait_timeout, vip_id):
- if int(timeout) < 30:
- module.fail_json(msg='"timeout" must be greater than or equal to 30')
-
- changed = False
- balancers = []
-
- clb = pyrax.cloud_loadbalancers
- if not clb:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- balancer_list = clb.list()
- while balancer_list:
- retrieved = clb.list(marker=balancer_list.pop().id)
- balancer_list.extend(retrieved)
- if len(retrieved) < 2:
- break
-
- for balancer in balancer_list:
- if name != balancer.name and name != balancer.id:
- continue
-
- balancers.append(balancer)
-
- if len(balancers) > 1:
- module.fail_json(msg='Multiple Load Balancers were matched by name, '
- 'try using the Load Balancer ID instead')
-
- if state == 'present':
- if isinstance(meta, dict):
- metadata = [dict(key=k, value=v) for k, v in meta.items()]
-
- if not balancers:
- try:
- virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
- balancer = clb.create(name, metadata=metadata, port=port,
- algorithm=algorithm, protocol=protocol,
- timeout=timeout, virtual_ips=virtual_ips)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- balancer = balancers[0]
- setattr(balancer, 'metadata',
- [dict(key=k, value=v) for k, v in
- balancer.get_metadata().items()])
- atts = {
- 'name': name,
- 'algorithm': algorithm,
- 'port': port,
- 'protocol': protocol,
- 'timeout': timeout
- }
- for att, value in atts.items():
- current = getattr(balancer, att)
- if current != value:
- changed = True
-
- if changed:
- balancer.update(**atts)
-
- if balancer.metadata != metadata:
- balancer.set_metadata(meta)
- changed = True
-
- virtual_ips = [clb.VirtualIP(type=vip_type)]
- current_vip_types = set([v.type for v in balancer.virtual_ips])
- vip_types = set([v.type for v in virtual_ips])
- if current_vip_types != vip_types:
- module.fail_json(msg='Load balancer Virtual IP type cannot '
- 'be changed')
-
- if wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
-
- balancer.get()
- instance = rax_to_dict(balancer, 'clb')
-
- result = dict(changed=changed, balancer=instance)
-
- if balancer.status == 'ERROR':
- result['msg'] = '%s failed to build' % balancer.id
- elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
- result['msg'] = 'Timeout waiting on %s' % balancer.id
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- elif state == 'absent':
- if balancers:
- balancer = balancers[0]
- try:
- balancer.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- instance = rax_to_dict(balancer, 'clb')
-
- if wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
- interval=5, attempts=attempts)
- else:
- instance = {}
-
- module.exit_json(changed=changed, balancer=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- algorithm=dict(choices=CLB_ALGORITHMS,
- default='LEAST_CONNECTIONS'),
- meta=dict(type='dict', default={}),
- name=dict(required=True),
- port=dict(type='int', default=80),
- protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
- state=dict(default='present', choices=['present', 'absent']),
- timeout=dict(type='int', default=30),
- type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
- vip_id=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- algorithm = module.params.get('algorithm')
- meta = module.params.get('meta')
- name = module.params.get('name')
- port = module.params.get('port')
- protocol = module.params.get('protocol')
- state = module.params.get('state')
- timeout = int(module.params.get('timeout'))
- vip_id = module.params.get('vip_id')
- vip_type = module.params.get('type')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- setup_rax_module(module, pyrax)
-
- cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
- vip_type, timeout, wait, wait_timeout, vip_id)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
deleted file mode 100644
index c076dced7..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_clb_nodes
-short_description: Add, modify and remove nodes from a Rackspace Cloud Load Balancer
-description:
- - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- address:
- type: str
- required: false
- description:
- - IP address or domain name of the node
- condition:
- type: str
- required: false
- choices:
- - enabled
- - disabled
- - draining
- description:
- - Condition for the node, which determines its role within the load
- balancer
- load_balancer_id:
- type: int
- required: true
- description:
- - Load balancer id
- node_id:
- type: int
- required: false
- description:
- - Node id
- port:
- type: int
- required: false
- description:
- - Port number of the load balanced service on the node
- state:
- type: str
- required: false
- default: "present"
- choices:
- - present
- - absent
- description:
- - Indicate desired state of the node
- type:
- type: str
- required: false
- choices:
- - primary
- - secondary
- description:
- - Type of node
- wait:
- required: false
- default: false
- type: bool
- description:
- - Wait for the load balancer to become active before returning
- wait_timeout:
- type: int
- required: false
- default: 30
- description:
- - How long to wait before giving up and returning an error
- weight:
- type: int
- required: false
- description:
- - Weight of node
- virtualenv:
- type: path
- description:
- - Virtualenv to execute this module in
-author: "Lukasz Kawczynski (@neuroid)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Add a new node to the load balancer
- local_action:
- module: rax_clb_nodes
- load_balancer_id: 71
- address: 10.2.2.3
- port: 80
- condition: enabled
- type: primary
- wait: true
- credentials: /path/to/credentials
-
-- name: Drain connections from a node
- local_action:
- module: rax_clb_nodes
- load_balancer_id: 71
- node_id: 410
- condition: draining
- wait: true
- credentials: /path/to/credentials
-
-- name: Remove a node from the load balancer
- local_action:
- module: rax_clb_nodes
- load_balancer_id: 71
- node_id: 410
- state: absent
- wait: true
- credentials: /path/to/credentials
-'''
-
-import os
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
-
-
-def _activate_virtualenv(path):
- activate_this = os.path.join(path, 'bin', 'activate_this.py')
- with open(activate_this) as f:
- code = compile(f.read(), activate_this, 'exec')
- exec(code)
-
-
-def _get_node(lb, node_id=None, address=None, port=None):
- """Return a matching node"""
- for node in getattr(lb, 'nodes', []):
- match_list = []
- if node_id is not None:
- match_list.append(getattr(node, 'id', None) == node_id)
- if address is not None:
- match_list.append(getattr(node, 'address', None) == address)
- if port is not None:
- match_list.append(getattr(node, 'port', None) == port)
-
- if match_list and all(match_list):
- return node
-
- return None
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- address=dict(),
- condition=dict(choices=['enabled', 'disabled', 'draining']),
- load_balancer_id=dict(required=True, type='int'),
- node_id=dict(type='int'),
- port=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- type=dict(choices=['primary', 'secondary']),
- virtualenv=dict(type='path'),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=30, type='int'),
- weight=dict(type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- address = module.params['address']
- condition = (module.params['condition'] and
- module.params['condition'].upper())
- load_balancer_id = module.params['load_balancer_id']
- node_id = module.params['node_id']
- port = module.params['port']
- state = module.params['state']
- typ = module.params['type'] and module.params['type'].upper()
- virtualenv = module.params['virtualenv']
- wait = module.params['wait']
- wait_timeout = module.params['wait_timeout'] or 1
- weight = module.params['weight']
-
- if virtualenv:
- try:
- _activate_virtualenv(virtualenv)
- except IOError as e:
- module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
- virtualenv, e))
-
- setup_rax_module(module, pyrax)
-
- if not pyrax.cloud_loadbalancers:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- try:
- lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
-
- node = _get_node(lb, node_id, address, port)
-
- result = rax_clb_node_to_dict(node)
-
- if state == 'absent':
- if not node: # Removing a non-existent node
- module.exit_json(changed=False, state=state)
- try:
- lb.delete_node(node)
- result = {}
- except pyrax.exc.NotFound:
- module.exit_json(changed=False, state=state)
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- else: # present
- if not node:
- if node_id: # Updating a non-existent node
- msg = 'Node %d not found' % node_id
- if lb.nodes:
- msg += (' (available nodes: %s)' %
- ', '.join([str(x.id) for x in lb.nodes]))
- module.fail_json(msg=msg)
- else: # Creating a new node
- try:
- node = pyrax.cloudloadbalancers.Node(
- address=address, port=port, condition=condition,
- weight=weight, type=typ)
- resp, body = lb.add_nodes([node])
- result.update(body['nodes'][0])
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- else: # Updating an existing node
- mutable = {
- 'condition': condition,
- 'type': typ,
- 'weight': weight,
- }
-
- for name in list(mutable):
- value = mutable[name]
- if value is None or value == getattr(node, name):
- mutable.pop(name)
-
- if not mutable:
- module.exit_json(changed=False, state=state, node=result)
-
- try:
- # The diff has to be set explicitly to update node's weight and
- # type; this should probably be fixed in pyrax
- lb.update_node(node, diff=mutable)
- result.update(mutable)
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
-
- if wait:
- pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
- attempts=wait_timeout)
- if lb.status != 'ACTIVE':
- module.fail_json(
- msg='Load balancer not active after %ds (current status: %s)' %
- (wait_timeout, lb.status.lower()))
-
- kwargs = {'node': result} if result else {}
- module.exit_json(changed=True, state=state, **kwargs)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
deleted file mode 100644
index b794130cf..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
+++ /dev/null
@@ -1,289 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: rax_clb_ssl
-short_description: Manage SSL termination for a Rackspace Cloud Load Balancer
-description:
-- Set up, reconfigure, or remove SSL termination for an existing load balancer.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- loadbalancer:
- type: str
- description:
- - Name or ID of the load balancer on which to manage SSL termination.
- required: true
- state:
- type: str
- description:
- - If set to "present", SSL termination will be added to this load balancer.
- - If "absent", SSL termination will be removed instead.
- choices:
- - present
- - absent
- default: present
- enabled:
- description:
- - If set to "false", temporarily disable SSL termination without discarding
- - existing credentials.
- default: true
- type: bool
- private_key:
- type: str
- description:
- - The private SSL key as a string in PEM format.
- certificate:
- type: str
- description:
- - The public SSL certificates as a string in PEM format.
- intermediate_certificate:
- type: str
- description:
- - One or more intermediate certificate authorities as a string in PEM
- - format, concatenated into a single string.
- secure_port:
- type: int
- description:
- - The port to listen for secure traffic.
- default: 443
- secure_traffic_only:
- description:
- - If "true", the load balancer will *only* accept secure traffic.
- default: false
- type: bool
- https_redirect:
- description:
- - If "true", the load balancer will redirect HTTP traffic to HTTPS.
- - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
- - termination is also applied or removed.
- type: bool
- wait:
- description:
- - Wait for the balancer to be in state "running" before turning.
- default: false
- type: bool
- wait_timeout:
- type: int
- description:
- - How long before "wait" gives up, in seconds.
- default: 300
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Enable SSL termination on a load balancer
- community.general.rax_clb_ssl:
- loadbalancer: the_loadbalancer
- state: present
- private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
- certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
- intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
- secure_traffic_only: true
- wait: true
-
-- name: Disable SSL termination
- community.general.rax_clb_ssl:
- loadbalancer: "{{ registered_lb.balancer.id }}"
- state: absent
- wait: true
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_find_loadbalancer,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
- certificate, intermediate_certificate, secure_port,
- secure_traffic_only, https_redirect,
- wait, wait_timeout):
- # Validate arguments.
-
- if state == 'present':
- if not private_key:
- module.fail_json(msg="private_key must be provided.")
- else:
- private_key = private_key.strip()
-
- if not certificate:
- module.fail_json(msg="certificate must be provided.")
- else:
- certificate = certificate.strip()
-
- attempts = wait_timeout // 5
-
- # Locate the load balancer.
-
- balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
- existing_ssl = balancer.get_ssl_termination()
-
- changed = False
-
- if state == 'present':
- # Apply or reconfigure SSL termination on the load balancer.
- ssl_attrs = dict(
- securePort=secure_port,
- privatekey=private_key,
- certificate=certificate,
- intermediateCertificate=intermediate_certificate,
- enabled=enabled,
- secureTrafficOnly=secure_traffic_only
- )
-
- needs_change = False
-
- if existing_ssl:
- for ssl_attr, value in ssl_attrs.items():
- if ssl_attr == 'privatekey':
- # The private key is not included in get_ssl_termination's
- # output (as it shouldn't be). Also, if you're changing the
- # private key, you'll also be changing the certificate,
- # so we don't lose anything by not checking it.
- continue
-
- if value is not None and existing_ssl.get(ssl_attr) != value:
- # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
- needs_change = True
- else:
- needs_change = True
-
- if needs_change:
- try:
- balancer.add_ssl_termination(**ssl_attrs)
- except pyrax.exceptions.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- changed = True
- elif state == 'absent':
- # Remove SSL termination if it's already configured.
- if existing_ssl:
- try:
- balancer.delete_ssl_termination()
- except pyrax.exceptions.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- changed = True
-
- if https_redirect is not None and balancer.httpsRedirect != https_redirect:
- if changed:
- # This wait is unavoidable because load balancers are immutable
- # while the SSL termination changes above are being applied.
- pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
-
- try:
- balancer.update(httpsRedirect=https_redirect)
- except pyrax.exceptions.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- changed = True
-
- if changed and wait:
- pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
-
- balancer.get()
- new_ssl_termination = balancer.get_ssl_termination()
-
- # Intentionally omit the private key from the module output, so you don't
- # accidentally echo it with `ansible-playbook -v` or `debug`, and the
- # certificate, which is just long. Convert other attributes to snake_case
- # and include https_redirect at the top-level.
- if new_ssl_termination:
- new_ssl = dict(
- enabled=new_ssl_termination['enabled'],
- secure_port=new_ssl_termination['securePort'],
- secure_traffic_only=new_ssl_termination['secureTrafficOnly']
- )
- else:
- new_ssl = None
-
- result = dict(
- changed=changed,
- https_redirect=balancer.httpsRedirect,
- ssl_termination=new_ssl,
- balancer=rax_to_dict(balancer, 'clb')
- )
- success = True
-
- if balancer.status == 'ERROR':
- result['msg'] = '%s failed to build' % balancer.id
- success = False
- elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
- result['msg'] = 'Timeout waiting on %s' % balancer.id
- success = False
-
- if success:
- module.exit_json(**result)
- else:
- module.fail_json(**result)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(dict(
- loadbalancer=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- enabled=dict(type='bool', default=True),
- private_key=dict(no_log=True),
- certificate=dict(),
- intermediate_certificate=dict(),
- secure_port=dict(type='int', default=443),
- secure_traffic_only=dict(type='bool', default=False),
- https_redirect=dict(type='bool'),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300)
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module.')
-
- loadbalancer = module.params.get('loadbalancer')
- state = module.params.get('state')
- enabled = module.boolean(module.params.get('enabled'))
- private_key = module.params.get('private_key')
- certificate = module.params.get('certificate')
- intermediate_certificate = module.params.get('intermediate_certificate')
- secure_port = module.params.get('secure_port')
- secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
- https_redirect = module.boolean(module.params.get('https_redirect'))
- wait = module.boolean(module.params.get('wait'))
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
-
- cloud_load_balancer_ssl(
- module, loadbalancer, state, enabled, private_key, certificate,
- intermediate_certificate, secure_port, secure_traffic_only,
- https_redirect, wait, wait_timeout
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_dns.py b/ansible_collections/community/general/plugins/modules/rax_dns.py
deleted file mode 100644
index 31782cd88..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_dns.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_dns
-short_description: Manage domains on Rackspace Cloud DNS
-description:
- - Manage domains on Rackspace Cloud DNS
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- comment:
- type: str
- description:
- - Brief description of the domain. Maximum length of 160 characters
- email:
- type: str
- description:
- - Email address of the domain administrator
- name:
- type: str
- description:
- - Domain name to create
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- ttl:
- type: int
- description:
- - Time to live of domain in seconds
- default: 3600
-notes:
- - "It is recommended that plays utilizing this module be run with
- C(serial: 1) to avoid exceeding the API request limit imposed by
- the Rackspace CloudDNS API"
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Create domain
- hosts: all
- gather_facts: false
- tasks:
- - name: Domain create request
- local_action:
- module: rax_dns
- credentials: ~/.raxpub
- name: example.org
- email: admin@example.org
- register: rax_dns
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_dns(module, comment, email, name, state, ttl):
- changed = False
-
- dns = pyrax.cloud_dns
- if not dns:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present':
- if not email:
- module.fail_json(msg='An "email" attribute is required for '
- 'creating a domain')
-
- try:
- domain = dns.find(name=name)
- except pyrax.exceptions.NoUniqueMatch as e:
- module.fail_json(msg='%s' % e.message)
- except pyrax.exceptions.NotFound:
- try:
- domain = dns.create(name=name, emailAddress=email, ttl=ttl,
- comment=comment)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- update = {}
- if comment != getattr(domain, 'comment', None):
- update['comment'] = comment
- if ttl != getattr(domain, 'ttl', None):
- update['ttl'] = ttl
- if email != getattr(domain, 'emailAddress', None):
- update['emailAddress'] = email
-
- if update:
- try:
- domain.update(**update)
- changed = True
- domain.get()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- domain = dns.find(name=name)
- except pyrax.exceptions.NotFound:
- domain = {}
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if domain:
- try:
- domain.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, domain=rax_to_dict(domain))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- comment=dict(),
- email=dict(),
- name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- ttl=dict(type='int', default=3600),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- comment = module.params.get('comment')
- email = module.params.get('email')
- name = module.params.get('name')
- state = module.params.get('state')
- ttl = module.params.get('ttl')
-
- setup_rax_module(module, pyrax, False)
-
- rax_dns(module, comment, email, name, state, ttl)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/rax_dns_record.py
deleted file mode 100644
index cb3cd279e..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_dns_record.py
+++ /dev/null
@@ -1,358 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_dns_record
-short_description: Manage DNS records on Rackspace Cloud DNS
-description:
- - Manage DNS records on Rackspace Cloud DNS
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- comment:
- type: str
- description:
- - Brief description of the domain. Maximum length of 160 characters
- data:
- type: str
- description:
- - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
- SRV/TXT
- required: true
- domain:
- type: str
- description:
- - Domain name to create the record in. This is an invalid option when
- type=PTR
- loadbalancer:
- type: str
- description:
- - Load Balancer ID to create a PTR record for. Only used with type=PTR
- name:
- type: str
- description:
- - FQDN record name to create
- required: true
- overwrite:
- description:
- - Add new records if data doesn't match, instead of updating existing
- record with matching name. If there are already multiple records with
- matching name and overwrite=true, this module will fail.
- default: true
- type: bool
- priority:
- type: int
- description:
- - Required for MX and SRV records, but forbidden for other record types.
- If specified, must be an integer from 0 to 65535.
- server:
- type: str
- description:
- - Server ID to create a PTR record for. Only used with type=PTR
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- ttl:
- type: int
- description:
- - Time to live of record in seconds
- default: 3600
- type:
- type: str
- description:
- - DNS record type
- choices:
- - A
- - AAAA
- - CNAME
- - MX
- - NS
- - SRV
- - TXT
- - PTR
- required: true
-notes:
- - "It is recommended that plays utilizing this module be run with
- C(serial: 1) to avoid exceeding the API request limit imposed by
- the Rackspace CloudDNS API."
- - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
- supplied.
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Create DNS Records
- hosts: all
- gather_facts: false
- tasks:
- - name: Create A record
- local_action:
- module: rax_dns_record
- credentials: ~/.raxpub
- domain: example.org
- name: www.example.org
- data: "{{ rax_accessipv4 }}"
- type: A
- register: a_record
-
- - name: Create PTR record
- local_action:
- module: rax_dns_record
- credentials: ~/.raxpub
- server: "{{ rax_id }}"
- name: "{{ inventory_hostname }}"
- region: DFW
- register: ptr_record
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_find_loadbalancer,
- rax_find_server,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
- name=None, server=None, state='present', ttl=7200):
- changed = False
- results = []
-
- dns = pyrax.cloud_dns
-
- if not dns:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if loadbalancer:
- item = rax_find_loadbalancer(module, pyrax, loadbalancer)
- elif server:
- item = rax_find_server(module, pyrax, server)
-
- if state == 'present':
- current = dns.list_ptr_records(item)
- for record in current:
- if record.data == data:
- if record.ttl != ttl or record.name != name:
- try:
- dns.update_ptr_record(item, record, name, data, ttl)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- record.ttl = ttl
- record.name = name
- results.append(rax_to_dict(record))
- break
- else:
- results.append(rax_to_dict(record))
- break
-
- if not results:
- record = dict(name=name, type='PTR', data=data, ttl=ttl,
- comment=comment)
- try:
- results = dns.add_ptr_records(item, [record])
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, records=results)
-
- elif state == 'absent':
- current = dns.list_ptr_records(item)
- for record in current:
- if record.data == data:
- results.append(rax_to_dict(record))
- break
-
- if results:
- try:
- dns.delete_ptr_records(item, data)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, records=results)
-
-
-def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
- overwrite=True, priority=None, record_type='A',
- state='present', ttl=7200):
- """Function for manipulating record types other than PTR"""
-
- changed = False
-
- dns = pyrax.cloud_dns
- if not dns:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present':
- if not priority and record_type in ['MX', 'SRV']:
- module.fail_json(msg='A "priority" attribute is required for '
- 'creating a MX or SRV record')
-
- try:
- domain = dns.find(name=domain)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- try:
- if overwrite:
- record = domain.find_record(record_type, name=name)
- else:
- record = domain.find_record(record_type, name=name, data=data)
- except pyrax.exceptions.DomainRecordNotUnique as e:
- module.fail_json(msg='overwrite=true and there are multiple matching records')
- except pyrax.exceptions.DomainRecordNotFound as e:
- try:
- record_data = {
- 'type': record_type,
- 'name': name,
- 'data': data,
- 'ttl': ttl
- }
- if comment:
- record_data.update(dict(comment=comment))
- if priority and record_type.upper() in ['MX', 'SRV']:
- record_data.update(dict(priority=priority))
-
- record = domain.add_records([record_data])[0]
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- update = {}
- if comment != getattr(record, 'comment', None):
- update['comment'] = comment
- if ttl != getattr(record, 'ttl', None):
- update['ttl'] = ttl
- if priority != getattr(record, 'priority', None):
- update['priority'] = priority
- if data != getattr(record, 'data', None):
- update['data'] = data
-
- if update:
- try:
- record.update(**update)
- changed = True
- record.get()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- domain = dns.find(name=domain)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- try:
- record = domain.find_record(record_type, name=name, data=data)
- except pyrax.exceptions.DomainRecordNotFound as e:
- record = {}
- except pyrax.exceptions.DomainRecordNotUnique as e:
- module.fail_json(msg='%s' % e.message)
-
- if record:
- try:
- record.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, record=rax_to_dict(record))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- comment=dict(),
- data=dict(required=True),
- domain=dict(),
- loadbalancer=dict(),
- name=dict(required=True),
- overwrite=dict(type='bool', default=True),
- priority=dict(type='int'),
- server=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- ttl=dict(type='int', default=3600),
- type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
- 'SRV', 'TXT', 'PTR'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[
- ['server', 'loadbalancer', 'domain'],
- ],
- required_one_of=[
- ['server', 'loadbalancer', 'domain'],
- ],
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- comment = module.params.get('comment')
- data = module.params.get('data')
- domain = module.params.get('domain')
- loadbalancer = module.params.get('loadbalancer')
- name = module.params.get('name')
- overwrite = module.params.get('overwrite')
- priority = module.params.get('priority')
- server = module.params.get('server')
- state = module.params.get('state')
- ttl = module.params.get('ttl')
- record_type = module.params.get('type')
-
- setup_rax_module(module, pyrax, False)
-
- if record_type.upper() == 'PTR':
- if not server and not loadbalancer:
- module.fail_json(msg='one of the following is required: '
- 'server,loadbalancer')
- rax_dns_record_ptr(module, data=data, comment=comment,
- loadbalancer=loadbalancer, name=name, server=server,
- state=state, ttl=ttl)
- else:
- rax_dns_record(module, comment=comment, data=data, domain=domain,
- name=name, overwrite=overwrite, priority=priority,
- record_type=record_type, state=state, ttl=ttl)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_facts.py b/ansible_collections/community/general/plugins/modules/rax_facts.py
deleted file mode 100644
index f8bb0e050..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_facts.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_facts
-short_description: Gather facts for Rackspace Cloud Servers
-description:
- - Gather facts for Rackspace Cloud Servers.
-attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
-options:
- address:
- type: str
- description:
- - Server IP address to retrieve facts for, will match any IP assigned to
- the server
- id:
- type: str
- description:
- - Server ID to retrieve facts for
- name:
- type: str
- description:
- - Server name to retrieve facts for
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
- - community.general.attributes.facts
- - community.general.attributes.facts_module
-
-'''
-
-EXAMPLES = '''
-- name: Gather info about servers
- hosts: all
- gather_facts: false
- tasks:
- - name: Get facts about servers
- local_action:
- module: rax_facts
- credentials: ~/.raxpub
- name: "{{ inventory_hostname }}"
- region: DFW
- - name: Map some facts
- ansible.builtin.set_fact:
- ansible_ssh_host: "{{ rax_accessipv4 }}"
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_facts(module, address, name, server_id):
- changed = False
-
- cs = pyrax.cloudservers
-
- if cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- ansible_facts = {}
-
- search_opts = {}
- if name:
- search_opts = dict(name='^%s$' % name)
- try:
- servers = cs.servers.list(search_opts=search_opts)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif address:
- servers = []
- try:
- for server in cs.servers.list():
- for addresses in server.networks.values():
- if address in addresses:
- servers.append(server)
- break
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif server_id:
- servers = []
- try:
- servers.append(cs.servers.get(server_id))
- except Exception as e:
- pass
-
- servers[:] = [server for server in servers if server.status != "DELETED"]
-
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers found matching provided '
- 'search parameters')
- elif len(servers) == 1:
- ansible_facts = rax_to_dict(servers[0], 'server')
-
- module.exit_json(changed=changed, ansible_facts=ansible_facts)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- address=dict(),
- id=dict(),
- name=dict(),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[['address', 'id', 'name']],
- required_one_of=[['address', 'id', 'name']],
- supports_check_mode=True,
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- address = module.params.get('address')
- server_id = module.params.get('id')
- name = module.params.get('name')
-
- setup_rax_module(module, pyrax)
-
- rax_facts(module, address, name, server_id)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_files.py b/ansible_collections/community/general/plugins/modules/rax_files.py
deleted file mode 100644
index a63e107eb..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_files.py
+++ /dev/null
@@ -1,400 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_files
-short_description: Manipulate Rackspace Cloud Files Containers
-description:
- - Manipulate Rackspace Cloud Files Containers
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- clear_meta:
- description:
- - Optionally clear existing metadata when applying metadata to existing containers.
- Selecting this option is only appropriate when setting type=meta
- type: bool
- default: false
- container:
- type: str
- description:
- - The container to use for container or metadata operations.
- meta:
- type: dict
- default: {}
- description:
- - A hash of items to set as metadata values on a container
- private:
- description:
- - Used to set a container as private, removing it from the CDN. B(Warning!)
- Private containers, if previously made public, can have live objects
- available until the TTL on cached objects expires
- type: bool
- default: false
- public:
- description:
- - Used to set a container as public, available via the Cloud Files CDN
- type: bool
- default: false
- region:
- type: str
- description:
- - Region to create an instance in
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent', 'list']
- default: present
- ttl:
- type: int
- description:
- - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
- Setting a TTL is only appropriate for containers that are public
- type:
- type: str
- description:
- - Type of object to do work on, i.e. metadata object or a container object
- choices:
- - container
- - meta
- default: container
- web_error:
- type: str
- description:
- - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
- web_index:
- type: str
- description:
- - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
-author: "Paul Durivage (@angstwad)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: "Test Cloud Files Containers"
- hosts: local
- gather_facts: false
- tasks:
- - name: "List all containers"
- community.general.rax_files:
- state: list
-
- - name: "Create container called 'mycontainer'"
- community.general.rax_files:
- container: mycontainer
-
- - name: "Create container 'mycontainer2' with metadata"
- community.general.rax_files:
- container: mycontainer2
- meta:
- key: value
- file_for: someuser@example.com
-
- - name: "Set a container's web index page"
- community.general.rax_files:
- container: mycontainer
- web_index: index.html
-
- - name: "Set a container's web error page"
- community.general.rax_files:
- container: mycontainer
- web_error: error.html
-
- - name: "Make container public"
- community.general.rax_files:
- container: mycontainer
- public: true
-
- - name: "Make container public with a 24 hour TTL"
- community.general.rax_files:
- container: mycontainer
- public: true
- ttl: 86400
-
- - name: "Make container private"
- community.general.rax_files:
- container: mycontainer
- private: true
-
-- name: "Test Cloud Files Containers Metadata Storage"
- hosts: local
- gather_facts: false
- tasks:
- - name: "Get mycontainer2 metadata"
- community.general.rax_files:
- container: mycontainer2
- type: meta
-
- - name: "Set mycontainer2 metadata"
- community.general.rax_files:
- container: mycontainer2
- type: meta
- meta:
- uploaded_by: someuser@example.com
-
- - name: "Remove mycontainer2 metadata"
- community.general.rax_files:
- container: "mycontainer2"
- type: meta
- state: absent
- meta:
- key: ""
- file_for: ""
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError as e:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-EXIT_DICT = dict(success=True)
-META_PREFIX = 'x-container-meta-'
-
-
-def _get_container(module, cf, container):
- try:
- return cf.get_container(container)
- except pyrax.exc.NoSuchContainer as e:
- module.fail_json(msg=e.message)
-
-
-def _fetch_meta(module, container):
- EXIT_DICT['meta'] = dict()
- try:
- for k, v in container.get_metadata().items():
- split_key = k.split(META_PREFIX)[-1]
- EXIT_DICT['meta'][split_key] = v
- except Exception as e:
- module.fail_json(msg=e.message)
-
-
-def meta(cf, module, container_, state, meta_, clear_meta):
- c = _get_container(module, cf, container_)
-
- if meta_ and state == 'present':
- try:
- meta_set = c.set_metadata(meta_, clear=clear_meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- elif meta_ and state == 'absent':
- remove_results = []
- for k, v in meta_.items():
- c.remove_metadata_key(k)
- remove_results.append(k)
- EXIT_DICT['deleted_meta_keys'] = remove_results
- elif state == 'absent':
- remove_results = []
- for k, v in c.get_metadata().items():
- c.remove_metadata_key(k)
- remove_results.append(k)
- EXIT_DICT['deleted_meta_keys'] = remove_results
-
- _fetch_meta(module, c)
- _locals = locals().keys()
-
- EXIT_DICT['container'] = c.name
- if 'meta_set' in _locals or 'remove_results' in _locals:
- EXIT_DICT['changed'] = True
-
- module.exit_json(**EXIT_DICT)
-
-
-def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
- private, web_index, web_error):
- if public and private:
- module.fail_json(msg='container cannot be simultaneously '
- 'set to public and private')
-
- if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
- module.fail_json(msg='state cannot be omitted when setting/removing '
- 'attributes on a container')
-
- if state == 'list':
- # We don't care if attributes are specified, let's list containers
- EXIT_DICT['containers'] = cf.list_containers()
- module.exit_json(**EXIT_DICT)
-
- try:
- c = cf.get_container(container_)
- except pyrax.exc.NoSuchContainer as e:
- # Make the container if state=present, otherwise bomb out
- if state == 'present':
- try:
- c = cf.create_container(container_)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['changed'] = True
- EXIT_DICT['created'] = True
- else:
- module.fail_json(msg=e.message)
- else:
- # Successfully grabbed a container object
- # Delete if state is absent
- if state == 'absent':
- try:
- cont_deleted = c.delete()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['deleted'] = True
-
- if meta_:
- try:
- meta_set = c.set_metadata(meta_, clear=clear_meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- finally:
- _fetch_meta(module, c)
-
- if ttl:
- try:
- c.cdn_ttl = ttl
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['ttl'] = c.cdn_ttl
-
- if public:
- try:
- cont_public = c.make_public()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
- ssl_url=c.cdn_ssl_uri,
- streaming_url=c.cdn_streaming_uri,
- ios_uri=c.cdn_ios_uri)
-
- if private:
- try:
- cont_private = c.make_private()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['set_private'] = True
-
- if web_index:
- try:
- cont_web_index = c.set_web_index_page(web_index)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['set_index'] = True
- finally:
- _fetch_meta(module, c)
-
- if web_error:
- try:
- cont_err_index = c.set_web_error_page(web_error)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['set_error'] = True
- finally:
- _fetch_meta(module, c)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['objs_in_container'] = c.object_count
- EXIT_DICT['total_bytes'] = c.total_bytes
-
- _locals = locals().keys()
- if ('cont_deleted' in _locals
- or 'meta_set' in _locals
- or 'cont_public' in _locals
- or 'cont_private' in _locals
- or 'cont_web_index' in _locals
- or 'cont_err_index' in _locals):
- EXIT_DICT['changed'] = True
-
- module.exit_json(**EXIT_DICT)
-
-
-def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
- private, web_index, web_error):
- """ Dispatch from here to work with metadata or file objects """
- cf = pyrax.cloudfiles
-
- if cf is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if typ == "container":
- container(cf, module, container_, state, meta_, clear_meta, ttl,
- public, private, web_index, web_error)
- else:
- meta(cf, module, container_, state, meta_, clear_meta)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- container=dict(),
- state=dict(choices=['present', 'absent', 'list'],
- default='present'),
- meta=dict(type='dict', default=dict()),
- clear_meta=dict(default=False, type='bool'),
- type=dict(choices=['container', 'meta'], default='container'),
- ttl=dict(type='int'),
- public=dict(default=False, type='bool'),
- private=dict(default=False, type='bool'),
- web_index=dict(),
- web_error=dict()
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- container_ = module.params.get('container')
- state = module.params.get('state')
- meta_ = module.params.get('meta')
- clear_meta = module.params.get('clear_meta')
- typ = module.params.get('type')
- ttl = module.params.get('ttl')
- public = module.params.get('public')
- private = module.params.get('private')
- web_index = module.params.get('web_index')
- web_error = module.params.get('web_error')
-
- if state in ['present', 'absent'] and not container_:
- module.fail_json(msg='please specify a container name')
- if clear_meta and not typ == 'meta':
- module.fail_json(msg='clear_meta can only be used when setting '
- 'metadata')
-
- setup_rax_module(module, pyrax)
- cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
- private, web_index, web_error)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/rax_files_objects.py
deleted file mode 100644
index bbcdfe4f8..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_files_objects.py
+++ /dev/null
@@ -1,556 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_files_objects
-short_description: Upload, download, and delete objects in Rackspace Cloud Files
-description:
- - Upload, download, and delete objects in Rackspace Cloud Files.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- clear_meta:
- description:
- - Optionally clear existing metadata when applying metadata to existing objects.
- Selecting this option is only appropriate when setting O(type=meta).
- type: bool
- default: false
- container:
- type: str
- description:
- - The container to use for file object operations.
- required: true
- dest:
- type: str
- description:
- - The destination of a C(get) operation; i.e. a local directory, C(/home/user/myfolder).
- Used to specify the destination of an operation on a remote object; i.e. a file name,
- V(file1), or a comma-separated list of remote objects, V(file1,file2,file17).
- expires:
- type: int
- description:
- - Used to set an expiration in seconds on an uploaded file or folder.
- meta:
- type: dict
- default: {}
- description:
- - Items to set as metadata values on an uploaded file or folder.
- method:
- type: str
- description:
- - >
- The method of operation to be performed: V(put) to upload files, V(get) to download files or
- V(delete) to remove remote objects in Cloud Files.
- choices:
- - get
- - put
- - delete
- default: get
- src:
- type: str
- description:
- - Source from which to upload files. Used to specify a remote object as a source for
- an operation, i.e. a file name, V(file1), or a comma-separated list of remote objects,
- V(file1,file2,file17). Parameters O(src) and O(dest) are mutually exclusive on remote-only object operations
- structure:
- description:
- - Used to specify whether to maintain nested directory structure when downloading objects
- from Cloud Files. Setting to false downloads the contents of a container to a single,
- flat directory
- type: bool
- default: true
- type:
- type: str
- description:
- - Type of object to do work on
- - Metadata object or a file object
- choices:
- - file
- - meta
- default: file
-author: "Paul Durivage (@angstwad)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: "Test Cloud Files Objects"
- hosts: local
- gather_facts: false
- tasks:
- - name: "Get objects from test container"
- community.general.rax_files_objects:
- container: testcont
- dest: ~/Downloads/testcont
-
- - name: "Get single object from test container"
- community.general.rax_files_objects:
- container: testcont
- src: file1
- dest: ~/Downloads/testcont
-
- - name: "Get several objects from test container"
- community.general.rax_files_objects:
- container: testcont
- src: file1,file2,file3
- dest: ~/Downloads/testcont
-
- - name: "Delete one object in test container"
- community.general.rax_files_objects:
- container: testcont
- method: delete
- dest: file1
-
- - name: "Delete several objects in test container"
- community.general.rax_files_objects:
- container: testcont
- method: delete
- dest: file2,file3,file4
-
- - name: "Delete all objects in test container"
- community.general.rax_files_objects:
- container: testcont
- method: delete
-
- - name: "Upload all files to test container"
- community.general.rax_files_objects:
- container: testcont
- method: put
- src: ~/Downloads/onehundred
-
- - name: "Upload one file to test container"
- community.general.rax_files_objects:
- container: testcont
- method: put
- src: ~/Downloads/testcont/file1
-
- - name: "Upload one file to test container with metadata"
- community.general.rax_files_objects:
- container: testcont
- src: ~/Downloads/testcont/file2
- method: put
- meta:
- testkey: testdata
- who_uploaded_this: someuser@example.com
-
- - name: "Upload one file to test container with TTL of 60 seconds"
- community.general.rax_files_objects:
- container: testcont
- method: put
- src: ~/Downloads/testcont/file3
- expires: 60
-
- - name: "Attempt to get remote object that does not exist"
- community.general.rax_files_objects:
- container: testcont
- method: get
- src: FileThatDoesNotExist.jpg
- dest: ~/Downloads/testcont
- ignore_errors: true
-
- - name: "Attempt to delete remote object that does not exist"
- community.general.rax_files_objects:
- container: testcont
- method: delete
- dest: FileThatDoesNotExist.jpg
- ignore_errors: true
-
-- name: "Test Cloud Files Objects Metadata"
- hosts: local
- gather_facts: false
- tasks:
- - name: "Get metadata on one object"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- dest: file2
-
- - name: "Get metadata on several objects"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- src: file2,file1
-
- - name: "Set metadata on an object"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- dest: file17
- method: put
- meta:
- key1: value1
- key2: value2
- clear_meta: true
-
- - name: "Verify metadata is set"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- src: file17
-
- - name: "Delete metadata"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- dest: file17
- method: delete
- meta:
- key1: ''
- key2: ''
-
- - name: "Get metadata on all objects"
- community.general.rax_files_objects:
- container: testcont
- type: meta
-'''
-
-import os
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-EXIT_DICT = dict(success=False)
-META_PREFIX = 'x-object-meta-'
-
-
-def _get_container(module, cf, container):
- try:
- return cf.get_container(container)
- except pyrax.exc.NoSuchContainer as e:
- module.fail_json(msg=e.message)
-
-
-def _upload_folder(cf, folder, container, ttl=None, headers=None):
- """ Uploads a folder to Cloud Files.
- """
- total_bytes = 0
- for root, dummy, files in os.walk(folder):
- for fname in files:
- full_path = os.path.join(root, fname)
- obj_name = os.path.relpath(full_path, folder)
- obj_size = os.path.getsize(full_path)
- cf.upload_file(container, full_path, obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
- total_bytes += obj_size
- return total_bytes
-
-
-def upload(module, cf, container, src, dest, meta, expires):
- """ Uploads a single object or a folder to Cloud Files Optionally sets an
- metadata, TTL value (expires), or Content-Disposition and Content-Encoding
- headers.
- """
- if not src:
- module.fail_json(msg='src must be specified when uploading')
-
- c = _get_container(module, cf, container)
- src = os.path.abspath(os.path.expanduser(src))
- is_dir = os.path.isdir(src)
-
- if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
- module.fail_json(msg='src must be a file or a directory')
- if dest and is_dir:
- module.fail_json(msg='dest cannot be set when whole '
- 'directories are uploaded')
-
- cont_obj = None
- total_bytes = 0
- try:
- if dest and not is_dir:
- cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
- elif is_dir:
- total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
- else:
- cont_obj = c.upload_file(src, ttl=expires, headers=meta)
- except Exception as e:
- module.fail_json(msg=e.message)
-
- EXIT_DICT['success'] = True
- EXIT_DICT['container'] = c.name
- EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
- if cont_obj or total_bytes > 0:
- EXIT_DICT['changed'] = True
- if meta:
- EXIT_DICT['meta'] = dict(updated=True)
-
- if cont_obj:
- EXIT_DICT['bytes'] = cont_obj.total_bytes
- EXIT_DICT['etag'] = cont_obj.etag
- else:
- EXIT_DICT['bytes'] = total_bytes
-
- module.exit_json(**EXIT_DICT)
-
-
-def download(module, cf, container, src, dest, structure):
- """ Download objects from Cloud Files to a local path specified by "dest".
- Optionally disable maintaining a directory structure by by passing a
- false value to "structure".
- """
- # Looking for an explicit destination
- if not dest:
- module.fail_json(msg='dest is a required argument when '
- 'downloading from Cloud Files')
-
- # Attempt to fetch the container by name
- c = _get_container(module, cf, container)
-
- # Accept a single object name or a comma-separated list of objs
- # If not specified, get the entire container
- if src:
- objs = map(str.strip, src.split(','))
- else:
- objs = c.get_object_names()
-
- dest = os.path.abspath(os.path.expanduser(dest))
- is_dir = os.path.isdir(dest)
-
- if not is_dir:
- module.fail_json(msg='dest must be a directory')
-
- try:
- results = [c.download_object(obj, dest, structure=structure) for obj in objs]
- except Exception as e:
- module.fail_json(msg=e.message)
-
- len_results = len(results)
- len_objs = len(objs)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['requested_downloaded'] = results
- if results:
- EXIT_DICT['changed'] = True
- if len_results == len_objs:
- EXIT_DICT['success'] = True
- EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
- else:
- EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
- "downloaded" % (len_results, len_objs)
- module.exit_json(**EXIT_DICT)
-
-
-def delete(module, cf, container, src, dest):
- """ Delete specific objects by proving a single file name or a
- comma-separated list to src OR dest (but not both). Omitting file name(s)
- assumes the entire container is to be deleted.
- """
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
- "have been specified on both src and dest args")
-
- c = _get_container(module, cf, container)
-
- objs = dest or src
- if objs:
- objs = map(str.strip, objs.split(','))
- else:
- objs = c.get_object_names()
-
- num_objs = len(objs)
-
- try:
- results = [c.delete_object(obj) for obj in objs]
- except Exception as e:
- module.fail_json(msg=e.message)
-
- num_deleted = results.count(True)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['deleted'] = num_deleted
- EXIT_DICT['requested_deleted'] = objs
-
- if num_deleted:
- EXIT_DICT['changed'] = True
-
- if num_objs == num_deleted:
- EXIT_DICT['success'] = True
- EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
- else:
- EXIT_DICT['msg'] = ("Error: only %s of %s objects "
- "deleted" % (num_deleted, num_objs))
- module.exit_json(**EXIT_DICT)
-
-
-def get_meta(module, cf, container, src, dest):
- """ Get metadata for a single file, comma-separated list, or entire
- container
- """
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
- "have been specified on both src and dest args")
-
- c = _get_container(module, cf, container)
-
- objs = dest or src
- if objs:
- objs = map(str.strip, objs.split(','))
- else:
- objs = c.get_object_names()
-
- try:
- results = dict()
- for obj in objs:
- meta = c.get_object(obj).get_metadata()
- results[obj] = dict((k.split(META_PREFIX)[-1], v) for k, v in meta.items())
- except Exception as e:
- module.fail_json(msg=e.message)
-
- EXIT_DICT['container'] = c.name
- if results:
- EXIT_DICT['meta_results'] = results
- EXIT_DICT['success'] = True
- module.exit_json(**EXIT_DICT)
-
-
-def put_meta(module, cf, container, src, dest, meta, clear_meta):
- """ Set metadata on a container, single file, or comma-separated list.
- Passing a true value to clear_meta clears the metadata stored in Cloud
- Files before setting the new metadata to the value of "meta".
- """
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; files to set meta"
- " have been specified on both src and dest args")
- objs = dest or src
- objs = map(str.strip, objs.split(','))
-
- c = _get_container(module, cf, container)
-
- try:
- results = [c.get_object(obj).set_metadata(meta, clear=clear_meta) for obj in objs]
- except Exception as e:
- module.fail_json(msg=e.message)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['success'] = True
- if results:
- EXIT_DICT['changed'] = True
- EXIT_DICT['num_changed'] = True
- module.exit_json(**EXIT_DICT)
-
-
-def delete_meta(module, cf, container, src, dest, meta):
- """ Removes metadata keys and values specified in meta, if any. Deletes on
- all objects specified by src or dest (but not both), if any; otherwise it
- deletes keys on all objects in the container
- """
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
- "deleted have been specified on both src and dest"
- " args")
- objs = dest or src
- objs = map(str.strip, objs.split(','))
-
- c = _get_container(module, cf, container)
-
- try:
- for obj in objs:
- o = c.get_object(obj)
- results = [
- o.remove_metadata_key(k)
- for k in (meta or o.get_metadata())
- ]
- except Exception as e:
- module.fail_json(msg=e.message)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['success'] = True
- if results:
- EXIT_DICT['changed'] = True
- EXIT_DICT['num_deleted'] = len(results)
- module.exit_json(**EXIT_DICT)
-
-
-def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
- structure, expires):
- """ Dispatch from here to work with metadata or file objects """
- cf = pyrax.cloudfiles
-
- if cf is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if typ == "file":
- if method == 'get':
- download(module, cf, container, src, dest, structure)
-
- if method == 'put':
- upload(module, cf, container, src, dest, meta, expires)
-
- if method == 'delete':
- delete(module, cf, container, src, dest)
-
- else:
- if method == 'get':
- get_meta(module, cf, container, src, dest)
-
- if method == 'put':
- put_meta(module, cf, container, src, dest, meta, clear_meta)
-
- if method == 'delete':
- delete_meta(module, cf, container, src, dest, meta)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- container=dict(required=True),
- src=dict(),
- dest=dict(),
- method=dict(default='get', choices=['put', 'get', 'delete']),
- type=dict(default='file', choices=['file', 'meta']),
- meta=dict(type='dict', default=dict()),
- clear_meta=dict(default=False, type='bool'),
- structure=dict(default=True, type='bool'),
- expires=dict(type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- container = module.params.get('container')
- src = module.params.get('src')
- dest = module.params.get('dest')
- method = module.params.get('method')
- typ = module.params.get('type')
- meta = module.params.get('meta')
- clear_meta = module.params.get('clear_meta')
- structure = module.params.get('structure')
- expires = module.params.get('expires')
-
- if clear_meta and not typ == 'meta':
- module.fail_json(msg='clear_meta can only be used when setting metadata')
-
- setup_rax_module(module, pyrax)
- cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_identity.py b/ansible_collections/community/general/plugins/modules/rax_identity.py
deleted file mode 100644
index b2eb15627..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_identity.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_identity
-short_description: Load Rackspace Cloud Identity
-description:
- - Verifies Rackspace Cloud credentials and returns identity information
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present']
- default: present
- required: false
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Load Rackspace Cloud Identity
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Load Identity
- local_action:
- module: rax_identity
- credentials: ~/.raxpub
- region: DFW
- register: rackspace_identity
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
- setup_rax_module)
-
-
-def cloud_identity(module, state, identity):
- instance = dict(
- authenticated=identity.authenticated,
- credentials=identity._creds_file
- )
- changed = False
-
- instance.update(rax_to_dict(identity))
- instance['services'] = instance.get('services', {}).keys()
-
- if state == 'present':
- if not identity.authenticated:
- module.fail_json(msg='Credentials could not be verified!')
-
- module.exit_json(changed=changed, identity=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- if not pyrax.identity:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- cloud_identity(module, state, pyrax.identity)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_keypair.py b/ansible_collections/community/general/plugins/modules/rax_keypair.py
deleted file mode 100644
index d7d7a2cc3..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_keypair.py
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_keypair
-short_description: Create a keypair for use with Rackspace Cloud Servers
-description:
- - Create a keypair for use with Rackspace Cloud Servers
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- name:
- type: str
- description:
- - Name of keypair
- required: true
- public_key:
- type: str
- description:
- - Public Key string to upload. Can be a file path or string
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
-author: "Matt Martz (@sivel)"
-notes:
- - Keypairs cannot be manipulated, only created and deleted. To "update" a
- keypair you must first delete and then recreate.
- - The ability to specify a file path for the public key was added in 1.7
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Create a keypair
- hosts: localhost
- gather_facts: false
- tasks:
- - name: Keypair request
- local_action:
- module: rax_keypair
- credentials: ~/.raxpub
- name: my_keypair
- region: DFW
- register: keypair
- - name: Create local public key
- local_action:
- module: copy
- content: "{{ keypair.keypair.public_key }}"
- dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- - name: Create local private key
- local_action:
- module: copy
- content: "{{ keypair.keypair.private_key }}"
- dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
-
-- name: Create a keypair
- hosts: localhost
- gather_facts: false
- tasks:
- - name: Keypair request
- local_action:
- module: rax_keypair
- credentials: ~/.raxpub
- name: my_keypair
- public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
- region: DFW
- register: keypair
-'''
-import os
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_keypair(module, name, public_key, state):
- changed = False
-
- cs = pyrax.cloudservers
-
- if cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- keypair = {}
-
- if state == 'present':
- if public_key and os.path.isfile(public_key):
- try:
- f = open(public_key)
- public_key = f.read()
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % public_key)
-
- try:
- keypair = cs.keypairs.find(name=name)
- except cs.exceptions.NotFound:
- try:
- keypair = cs.keypairs.create(name, public_key)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- keypair = cs.keypairs.find(name=name)
- except Exception:
- pass
-
- if keypair:
- try:
- keypair.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True),
- public_key=dict(),
- state=dict(default='present', choices=['absent', 'present']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- name = module.params.get('name')
- public_key = module.params.get('public_key')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- rax_keypair(module, name, public_key, state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_meta.py b/ansible_collections/community/general/plugins/modules/rax_meta.py
deleted file mode 100644
index 7b52e906f..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_meta.py
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_meta
-short_description: Manipulate metadata for Rackspace Cloud Servers
-description:
- - Manipulate metadata for Rackspace Cloud Servers
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- address:
- type: str
- description:
- - Server IP address to modify metadata for, will match any IP assigned to
- the server
- id:
- type: str
- description:
- - Server ID to modify metadata for
- name:
- type: str
- description:
- - Server name to modify metadata for
- meta:
- type: dict
- default: {}
- description:
- - A hash of metadata to associate with the instance
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Set metadata for a server
- hosts: all
- gather_facts: false
- tasks:
- - name: Set metadata
- local_action:
- module: rax_meta
- credentials: ~/.raxpub
- name: "{{ inventory_hostname }}"
- region: DFW
- meta:
- group: primary_group
- groups:
- - group_two
- - group_three
- app: my_app
-
- - name: Clear metadata
- local_action:
- module: rax_meta
- credentials: ~/.raxpub
- name: "{{ inventory_hostname }}"
- region: DFW
-'''
-
-import json
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-from ansible.module_utils.six import string_types
-
-
-def rax_meta(module, address, name, server_id, meta):
- changed = False
-
- cs = pyrax.cloudservers
-
- if cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- search_opts = {}
- if name:
- search_opts = dict(name='^%s$' % name)
- try:
- servers = cs.servers.list(search_opts=search_opts)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif address:
- servers = []
- try:
- for server in cs.servers.list():
- for addresses in server.networks.values():
- if address in addresses:
- servers.append(server)
- break
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif server_id:
- servers = []
- try:
- servers.append(cs.servers.get(server_id))
- except Exception as e:
- pass
-
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers found matching provided '
- 'search parameters')
- elif not servers:
- module.fail_json(msg='Failed to find a server matching provided '
- 'search parameters')
-
- # Normalize and ensure all metadata values are strings
- for k, v in meta.items():
- if isinstance(v, list):
- meta[k] = ','.join(['%s' % i for i in v])
- elif isinstance(v, dict):
- meta[k] = json.dumps(v)
- elif not isinstance(v, string_types):
- meta[k] = '%s' % v
-
- server = servers[0]
- if server.metadata == meta:
- changed = False
- else:
- changed = True
- removed = set(server.metadata.keys()).difference(meta.keys())
- cs.servers.delete_meta(server, list(removed))
- cs.servers.set_meta(server, meta)
- server.get()
-
- module.exit_json(changed=changed, meta=server.metadata)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- address=dict(),
- id=dict(),
- name=dict(),
- meta=dict(type='dict', default=dict()),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[['address', 'id', 'name']],
- required_one_of=[['address', 'id', 'name']],
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- address = module.params.get('address')
- server_id = module.params.get('id')
- name = module.params.get('name')
- meta = module.params.get('meta')
-
- setup_rax_module(module, pyrax)
-
- rax_meta(module, address, name, server_id, meta)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
deleted file mode 100644
index b66611a90..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
+++ /dev/null
@@ -1,235 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_alarm
-short_description: Create or delete a Rackspace Cloud Monitoring alarm
-description:
-- Create or delete a Rackspace Cloud Monitoring alarm that associates an
- existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
- criteria that specify what conditions will trigger which levels of
- notifications. Rackspace monitoring module flow | rax_mon_entity ->
- rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
- *rax_mon_alarm*
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- state:
- type: str
- description:
- - Ensure that the alarm with this O(label) exists or does not exist.
- choices: [ "present", "absent" ]
- required: false
- default: present
- label:
- type: str
- description:
- - Friendly name for this alarm, used to achieve idempotence. Must be a String
- between 1 and 255 characters long.
- required: true
- entity_id:
- type: str
- description:
- - ID of the entity this alarm is attached to. May be acquired by registering
- the value of a rax_mon_entity task.
- required: true
- check_id:
- type: str
- description:
- - ID of the check that should be alerted on. May be acquired by registering
- the value of a rax_mon_check task.
- required: true
- notification_plan_id:
- type: str
- description:
- - ID of the notification plan to trigger if this alarm fires. May be acquired
- by registering the value of a rax_mon_notification_plan task.
- required: true
- criteria:
- type: str
- description:
- - Alarm DSL that describes alerting conditions and their output states. Must
- be between 1 and 16384 characters long. See
- http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
- for a reference on the alerting language.
- disabled:
- description:
- - If yes, create this alarm, but leave it in an inactive state. Defaults to
- no.
- type: bool
- default: false
- metadata:
- type: dict
- description:
- - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
- keys and values between 1 and 255 characters long.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Alarm example
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Ensure that a specific alarm exists.
- community.general.rax_mon_alarm:
- credentials: ~/.rax_pub
- state: present
- label: uhoh
- entity_id: "{{ the_entity['entity']['id'] }}"
- check_id: "{{ the_check['check']['id'] }}"
- notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
- criteria: >
- if (rate(metric['average']) > 10) {
- return new AlarmStatus(WARNING);
- }
- return new AlarmStatus(OK);
- register: the_alarm
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
- disabled, metadata):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- if criteria and len(criteria) < 1 or len(criteria) > 16384:
- module.fail_json(msg='criteria must be between 1 and 16384 characters long')
-
- # Coerce attributes.
-
- changed = False
- alarm = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
-
- if existing:
- alarm = existing[0]
-
- if state == 'present':
- should_create = False
- should_update = False
- should_delete = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s existing alarms have the label %s.' %
- (len(existing), label))
-
- if alarm:
- if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
- should_delete = should_create = True
-
- should_update = (disabled and disabled != alarm.disabled) or \
- (metadata and metadata != alarm.metadata) or \
- (criteria and criteria != alarm.criteria)
-
- if should_update and not should_delete:
- cm.update_alarm(entity=entity_id, alarm=alarm,
- criteria=criteria, disabled=disabled,
- label=label, metadata=metadata)
- changed = True
-
- if should_delete:
- alarm.delete()
- changed = True
- else:
- should_create = True
-
- if should_create:
- alarm = cm.create_alarm(entity=entity_id, check=check_id,
- notification_plan=notification_plan_id,
- criteria=criteria, disabled=disabled, label=label,
- metadata=metadata)
- changed = True
- else:
- for a in existing:
- a.delete()
- changed = True
-
- if alarm:
- alarm_dict = {
- "id": alarm.id,
- "label": alarm.label,
- "check_id": alarm.check_id,
- "notification_plan_id": alarm.notification_plan_id,
- "criteria": alarm.criteria,
- "disabled": alarm.disabled,
- "metadata": alarm.metadata
- }
- module.exit_json(changed=changed, alarm=alarm_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- entity_id=dict(required=True),
- check_id=dict(required=True),
- notification_plan_id=dict(required=True),
- criteria=dict(),
- disabled=dict(type='bool', default=False),
- metadata=dict(type='dict')
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
- label = module.params.get('label')
- entity_id = module.params.get('entity_id')
- check_id = module.params.get('check_id')
- notification_plan_id = module.params.get('notification_plan_id')
- criteria = module.params.get('criteria')
- disabled = module.boolean(module.params.get('disabled'))
- metadata = module.params.get('metadata')
-
- setup_rax_module(module, pyrax)
-
- alarm(module, state, label, entity_id, check_id, notification_plan_id,
- criteria, disabled, metadata)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/rax_mon_check.py
deleted file mode 100644
index 253c26dcf..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_mon_check.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_check
-short_description: Create or delete a Rackspace Cloud Monitoring check for an
- existing entity.
-description:
-- Create or delete a Rackspace Cloud Monitoring check associated with an
- existing rax_mon_entity. A check is a specific test or measurement that is
- performed, possibly from different monitoring zones, on the systems you
- monitor. Rackspace monitoring module flow | rax_mon_entity ->
- *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
- rax_mon_alarm
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- state:
- type: str
- description:
- - Ensure that a check with this O(label) exists or does not exist.
- choices: ["present", "absent"]
- default: present
- entity_id:
- type: str
- description:
- - ID of the rax_mon_entity to target with this check.
- required: true
- label:
- type: str
- description:
- - Defines a label for this check, between 1 and 64 characters long.
- required: true
- check_type:
- type: str
- description:
- - The type of check to create. C(remote.) checks may be created on any
- rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
- that have a non-null C(agent_id).
- - |
- Choices for this option are:
- - V(remote.dns)
- - V(remote.ftp-banner)
- - V(remote.http)
- - V(remote.imap-banner)
- - V(remote.mssql-banner)
- - V(remote.mysql-banner)
- - V(remote.ping)
- - V(remote.pop3-banner)
- - V(remote.postgresql-banner)
- - V(remote.smtp-banner)
- - V(remote.smtp)
- - V(remote.ssh)
- - V(remote.tcp)
- - V(remote.telnet-banner)
- - V(agent.filesystem)
- - V(agent.memory)
- - V(agent.load_average)
- - V(agent.cpu)
- - V(agent.disk)
- - V(agent.network)
- - V(agent.plugin)
- required: true
- monitoring_zones_poll:
- type: str
- description:
- - Comma-separated list of the names of the monitoring zones the check should
- run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
- mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
- target_hostname:
- type: str
- description:
- - One of O(target_hostname) and O(target_alias) is required for remote.* checks,
- but prohibited for agent.* checks. The hostname this check should target.
- Must be a valid IPv4, IPv6, or FQDN.
- target_alias:
- type: str
- description:
- - One of O(target_alias) and O(target_hostname) is required for remote.* checks,
- but prohibited for agent.* checks. Use the corresponding key in the entity's
- C(ip_addresses) hash to resolve an IP address to target.
- details:
- type: dict
- default: {}
- description:
- - Additional details specific to the check type. Must be a hash of strings
- between 1 and 255 characters long, or an array or object containing 0 to
- 256 items.
- disabled:
- description:
- - If V(true), ensure the check is created, but don't actually use it yet.
- type: bool
- default: false
- metadata:
- type: dict
- default: {}
- description:
- - Hash of arbitrary key-value pairs to accompany this check if it fires.
- Keys and values must be strings between 1 and 255 characters long.
- period:
- type: int
- description:
- - The number of seconds between each time the check is performed. Must be
- greater than the minimum period set on your account.
- timeout:
- type: int
- description:
- - The number of seconds this check will wait when attempting to collect
- results. Must be less than the period.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Create a monitoring check
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Associate a check with an existing entity.
- community.general.rax_mon_check:
- credentials: ~/.rax_pub
- state: present
- entity_id: "{{ the_entity['entity']['id'] }}"
- label: the_check
- check_type: remote.ping
- monitoring_zones_poll: mziad,mzord,mzdfw
- details:
- count: 10
- meta:
- hurf: durf
- register: the_check
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_check(module, state, entity_id, label, check_type,
- monitoring_zones_poll, target_hostname, target_alias, details,
- disabled, metadata, period, timeout):
-
- # Coerce attributes.
-
- if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
- monitoring_zones_poll = [monitoring_zones_poll]
-
- if period:
- period = int(period)
-
- if timeout:
- timeout = int(timeout)
-
- changed = False
- check = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- entity = cm.get_entity(entity_id)
- if not entity:
- module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
- ' a valid entity id.' % entity_id)
-
- existing = [e for e in entity.list_checks() if e.label == label]
-
- if existing:
- check = existing[0]
-
- if state == 'present':
- if len(existing) > 1:
- module.fail_json(msg='%s existing checks have a label of %s.' %
- (len(existing), label))
-
- should_delete = False
- should_create = False
- should_update = False
-
- if check:
- # Details may include keys set to default values that are not
- # included in the initial creation.
- #
- # Only force a recreation of the check if one of the *specified*
- # keys is missing or has a different value.
- if details:
- for (key, value) in details.items():
- if key not in check.details:
- should_delete = should_create = True
- elif value != check.details[key]:
- should_delete = should_create = True
-
- should_update = label != check.label or \
- (target_hostname and target_hostname != check.target_hostname) or \
- (target_alias and target_alias != check.target_alias) or \
- (disabled != check.disabled) or \
- (metadata and metadata != check.metadata) or \
- (period and period != check.period) or \
- (timeout and timeout != check.timeout) or \
- (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
-
- if should_update and not should_delete:
- check.update(label=label,
- disabled=disabled,
- metadata=metadata,
- monitoring_zones_poll=monitoring_zones_poll,
- timeout=timeout,
- period=period,
- target_alias=target_alias,
- target_hostname=target_hostname)
- changed = True
- else:
- # The check doesn't exist yet.
- should_create = True
-
- if should_delete:
- check.delete()
-
- if should_create:
- check = cm.create_check(entity,
- label=label,
- check_type=check_type,
- target_hostname=target_hostname,
- target_alias=target_alias,
- monitoring_zones_poll=monitoring_zones_poll,
- details=details,
- disabled=disabled,
- metadata=metadata,
- period=period,
- timeout=timeout)
- changed = True
- elif state == 'absent':
- if check:
- check.delete()
- changed = True
- else:
- module.fail_json(msg='state must be either present or absent.')
-
- if check:
- check_dict = {
- "id": check.id,
- "label": check.label,
- "type": check.type,
- "target_hostname": check.target_hostname,
- "target_alias": check.target_alias,
- "monitoring_zones_poll": check.monitoring_zones_poll,
- "details": check.details,
- "disabled": check.disabled,
- "metadata": check.metadata,
- "period": check.period,
- "timeout": check.timeout
- }
- module.exit_json(changed=changed, check=check_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- entity_id=dict(required=True),
- label=dict(required=True),
- check_type=dict(required=True),
- monitoring_zones_poll=dict(),
- target_hostname=dict(),
- target_alias=dict(),
- details=dict(type='dict', default={}),
- disabled=dict(type='bool', default=False),
- metadata=dict(type='dict', default={}),
- period=dict(type='int'),
- timeout=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- entity_id = module.params.get('entity_id')
- label = module.params.get('label')
- check_type = module.params.get('check_type')
- monitoring_zones_poll = module.params.get('monitoring_zones_poll')
- target_hostname = module.params.get('target_hostname')
- target_alias = module.params.get('target_alias')
- details = module.params.get('details')
- disabled = module.boolean(module.params.get('disabled'))
- metadata = module.params.get('metadata')
- period = module.params.get('period')
- timeout = module.params.get('timeout')
-
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- cloud_check(module, state, entity_id, label, check_type,
- monitoring_zones_poll, target_hostname, target_alias, details,
- disabled, metadata, period, timeout)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
deleted file mode 100644
index fbad9f98f..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
+++ /dev/null
@@ -1,201 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_entity
-short_description: Create or delete a Rackspace Cloud Monitoring entity
-description:
-- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
- to monitor. Entities associate checks and alarms with a target system and
- provide a convenient, centralized place to store IP addresses. Rackspace
- monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
- rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- label:
- type: str
- description:
- - Defines a name for this entity. Must be a non-empty string between 1 and
- 255 characters long.
- required: true
- state:
- type: str
- description:
- - Ensure that an entity with this C(name) exists or does not exist.
- choices: ["present", "absent"]
- default: present
- agent_id:
- type: str
- description:
- - Rackspace monitoring agent on the target device to which this entity is
- bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
- named_ip_addresses:
- type: dict
- default: {}
- description:
- - Hash of IP addresses that may be referenced by name by rax_mon_checks
- added to this entity. Must be a dictionary of with keys that are names
- between 1 and 64 characters long, and values that are valid IPv4 or IPv6
- addresses.
- metadata:
- type: dict
- default: {}
- description:
- - Hash of arbitrary C(name), C(value) pairs that are passed to associated
- rax_mon_alarms. Names and values must all be between 1 and 255 characters
- long.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Entity example
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Ensure an entity exists
- community.general.rax_mon_entity:
- credentials: ~/.rax_pub
- state: present
- label: my_entity
- named_ip_addresses:
- web_box: 192.0.2.4
- db_box: 192.0.2.5
- meta:
- hurf: durf
- register: the_entity
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
- metadata):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- changed = False
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = []
- for entity in cm.list_entities():
- if label == entity.label:
- existing.append(entity)
-
- entity = None
-
- if existing:
- entity = existing[0]
-
- if state == 'present':
- should_update = False
- should_delete = False
- should_create = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s existing entities have the label %s.' %
- (len(existing), label))
-
- if entity:
- if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
- should_delete = should_create = True
-
- # Change an existing Entity, unless there's nothing to do.
- should_update = agent_id and agent_id != entity.agent_id or \
- (metadata and metadata != entity.metadata)
-
- if should_update and not should_delete:
- entity.update(agent_id, metadata)
- changed = True
-
- if should_delete:
- entity.delete()
- else:
- should_create = True
-
- if should_create:
- # Create a new Entity.
- entity = cm.create_entity(label=label, agent=agent_id,
- ip_addresses=named_ip_addresses,
- metadata=metadata)
- changed = True
- else:
- # Delete the existing Entities.
- for e in existing:
- e.delete()
- changed = True
-
- if entity:
- entity_dict = {
- "id": entity.id,
- "name": entity.name,
- "agent_id": entity.agent_id,
- }
- module.exit_json(changed=changed, entity=entity_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- agent_id=dict(),
- named_ip_addresses=dict(type='dict', default={}),
- metadata=dict(type='dict', default={})
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- label = module.params.get('label')
- agent_id = module.params.get('agent_id')
- named_ip_addresses = module.params.get('named_ip_addresses')
- metadata = module.params.get('metadata')
-
- setup_rax_module(module, pyrax)
-
- cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
deleted file mode 100644
index 7539f2a37..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_notification
-short_description: Create or delete a Rackspace Cloud Monitoring notification
-description:
-- Create or delete a Rackspace Cloud Monitoring notification that specifies a
- channel that can be used to communicate alarms, such as email, webhooks, or
- PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
- *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- state:
- type: str
- description:
- - Ensure that the notification with this O(label) exists or does not exist.
- choices: ['present', 'absent']
- default: present
- label:
- type: str
- description:
- - Defines a friendly name for this notification. String between 1 and 255
- characters long.
- required: true
- notification_type:
- type: str
- description:
- - A supported notification type.
- choices: ["webhook", "email", "pagerduty"]
- required: true
- details:
- type: dict
- description:
- - Dictionary of key-value pairs used to initialize the notification.
- Required keys and meanings vary with notification type. See
- http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
- service-notification-types-crud.html for details.
- required: true
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Monitoring notification example
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Email me when something goes wrong.
- rax_mon_entity:
- credentials: ~/.rax_pub
- label: omg
- type: email
- details:
- address: me@mailhost.com
- register: the_notification
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def notification(module, state, label, notification_type, details):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- changed = False
- notification = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = []
- for n in cm.list_notifications():
- if n.label == label:
- existing.append(n)
-
- if existing:
- notification = existing[0]
-
- if state == 'present':
- should_update = False
- should_delete = False
- should_create = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s existing notifications are labelled %s.' %
- (len(existing), label))
-
- if notification:
- should_delete = (notification_type != notification.type)
-
- should_update = (details != notification.details)
-
- if should_update and not should_delete:
- notification.update(details=notification.details)
- changed = True
-
- if should_delete:
- notification.delete()
- else:
- should_create = True
-
- if should_create:
- notification = cm.create_notification(notification_type,
- label=label, details=details)
- changed = True
- else:
- for n in existing:
- n.delete()
- changed = True
-
- if notification:
- notification_dict = {
- "id": notification.id,
- "type": notification.type,
- "label": notification.label,
- "details": notification.details
- }
- module.exit_json(changed=changed, notification=notification_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
- details=dict(required=True, type='dict')
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- label = module.params.get('label')
- notification_type = module.params.get('notification_type')
- details = module.params.get('details')
-
- setup_rax_module(module, pyrax)
-
- notification(module, state, label, notification_type, details)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
deleted file mode 100644
index 31647304b..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_notification_plan
-short_description: Create or delete a Rackspace Cloud Monitoring notification
- plan.
-description:
-- Create or delete a Rackspace Cloud Monitoring notification plan by
- associating existing rax_mon_notifications with severity levels. Rackspace
- monitoring module flow | rax_mon_entity -> rax_mon_check ->
- rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- state:
- type: str
- description:
- - Ensure that the notification plan with this O(label) exists or does not
- exist.
- choices: ['present', 'absent']
- default: present
- label:
- type: str
- description:
- - Defines a friendly name for this notification plan. String between 1 and
- 255 characters long.
- required: true
- critical_state:
- type: list
- elements: str
- description:
- - Notification list to use when the alarm state is CRITICAL. Must be an
- array of valid rax_mon_notification ids.
- warning_state:
- type: list
- elements: str
- description:
- - Notification list to use when the alarm state is WARNING. Must be an array
- of valid rax_mon_notification ids.
- ok_state:
- type: list
- elements: str
- description:
- - Notification list to use when the alarm state is OK. Must be an array of
- valid rax_mon_notification ids.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Example notification plan
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Establish who gets called when.
- community.general.rax_mon_notification_plan:
- credentials: ~/.rax_pub
- state: present
- label: defcon1
- critical_state:
- - "{{ everyone['notification']['id'] }}"
- warning_state:
- - "{{ opsfloor['notification']['id'] }}"
- register: defcon1
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def notification_plan(module, state, label, critical_state, warning_state, ok_state):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- changed = False
- notification_plan = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = []
- for n in cm.list_notification_plans():
- if n.label == label:
- existing.append(n)
-
- if existing:
- notification_plan = existing[0]
-
- if state == 'present':
- should_create = False
- should_delete = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s notification plans are labelled %s.' %
- (len(existing), label))
-
- if notification_plan:
- should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
- (warning_state and warning_state != notification_plan.warning_state) or \
- (ok_state and ok_state != notification_plan.ok_state)
-
- if should_delete:
- notification_plan.delete()
- should_create = True
- else:
- should_create = True
-
- if should_create:
- notification_plan = cm.create_notification_plan(label=label,
- critical_state=critical_state,
- warning_state=warning_state,
- ok_state=ok_state)
- changed = True
- else:
- for np in existing:
- np.delete()
- changed = True
-
- if notification_plan:
- notification_plan_dict = {
- "id": notification_plan.id,
- "critical_state": notification_plan.critical_state,
- "warning_state": notification_plan.warning_state,
- "ok_state": notification_plan.ok_state,
- "metadata": notification_plan.metadata
- }
- module.exit_json(changed=changed, notification_plan=notification_plan_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- critical_state=dict(type='list', elements='str'),
- warning_state=dict(type='list', elements='str'),
- ok_state=dict(type='list', elements='str'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- label = module.params.get('label')
- critical_state = module.params.get('critical_state')
- warning_state = module.params.get('warning_state')
- ok_state = module.params.get('ok_state')
-
- setup_rax_module(module, pyrax)
-
- notification_plan(module, state, label, critical_state, warning_state, ok_state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_network.py b/ansible_collections/community/general/plugins/modules/rax_network.py
deleted file mode 100644
index 22f148366..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_network.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_network
-short_description: Create / delete an isolated network in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud isolated network.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- label:
- type: str
- description:
- - Label (name) to give the network
- required: true
- cidr:
- type: str
- description:
- - cidr of the network being created
-author:
- - "Christopher H. Laco (@claco)"
- - "Jesse Keating (@omgjlk)"
-extends_documentation_fragment:
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build an Isolated Network
- gather_facts: false
-
- tasks:
- - name: Network create request
- local_action:
- module: rax_network
- credentials: ~/.raxpub
- label: my-net
- cidr: 192.168.3.0/24
- state: present
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_network(module, state, label, cidr):
- changed = False
- network = None
- networks = []
-
- if not pyrax.cloud_networks:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present':
- if not cidr:
- module.fail_json(msg='missing required arguments: cidr')
-
- try:
- network = pyrax.cloud_networks.find_network_by_label(label)
- except pyrax.exceptions.NetworkNotFound:
- try:
- network = pyrax.cloud_networks.create(label, cidr=cidr)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- network = pyrax.cloud_networks.find_network_by_label(label)
- network.delete()
- changed = True
- except pyrax.exceptions.NetworkNotFound:
- pass
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if network:
- instance = dict(id=network.id,
- label=network.label,
- cidr=network.cidr)
- networks.append(instance)
-
- module.exit_json(changed=changed, networks=networks)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present',
- choices=['present', 'absent']),
- label=dict(required=True),
- cidr=dict()
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
- label = module.params.get('label')
- cidr = module.params.get('cidr')
-
- setup_rax_module(module, pyrax)
-
- cloud_network(module, state, label, cidr)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_queue.py b/ansible_collections/community/general/plugins/modules/rax_queue.py
deleted file mode 100644
index 00f730b27..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_queue.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_queue
-short_description: Create / delete a queue in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud queue.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- name:
- type: str
- description:
- - Name to give the queue
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
-- name: Build a Queue
- gather_facts: false
- hosts: local
- connection: local
- tasks:
- - name: Queue create request
- local_action:
- module: rax_queue
- credentials: ~/.raxpub
- name: my-queue
- region: DFW
- state: present
- register: my_queue
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_queue(module, state, name):
- for arg in (state, name):
- if not arg:
- module.fail_json(msg='%s is required for rax_queue' % arg)
-
- changed = False
- queues = []
- instance = {}
-
- cq = pyrax.queues
- if not cq:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- for queue in cq.list():
- if name != queue.name:
- continue
-
- queues.append(queue)
-
- if len(queues) > 1:
- module.fail_json(msg='Multiple Queues were matched by name')
-
- if state == 'present':
- if not queues:
- try:
- queue = cq.create(name)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- queue = queues[0]
-
- instance = dict(name=queue.name)
- result = dict(changed=changed, queue=instance)
- module.exit_json(**result)
-
- elif state == 'absent':
- if queues:
- queue = queues[0]
- try:
- queue.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, queue=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- name = module.params.get('name')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- cloud_queue(module, state, name)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
deleted file mode 100644
index f4bb79025..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
+++ /dev/null
@@ -1,441 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_scaling_group
-short_description: Manipulate Rackspace Cloud Autoscale Groups
-description:
- - Manipulate Rackspace Cloud Autoscale Groups
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- config_drive:
- description:
- - Attach read-only configuration drive to server as label config-2
- type: bool
- default: false
- cooldown:
- type: int
- description:
- - The period of time, in seconds, that must pass before any scaling can
- occur after the previous scaling. Must be an integer between 0 and
- 86400 (24 hrs).
- default: 300
- disk_config:
- type: str
- description:
- - Disk partitioning strategy
- - If not specified, it will fallback to V(auto).
- choices:
- - auto
- - manual
- files:
- type: dict
- default: {}
- description:
- - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
- flavor:
- type: str
- description:
- - flavor to use for the instance
- required: true
- image:
- type: str
- description:
- - image to use for the instance. Can be an C(id), C(human_id) or C(name).
- required: true
- key_name:
- type: str
- description:
- - key pair to use on the instance
- loadbalancers:
- type: list
- elements: dict
- description:
- - List of load balancer C(id) and C(port) hashes
- max_entities:
- type: int
- description:
- - The maximum number of entities that are allowed in the scaling group.
- Must be an integer between 0 and 1000.
- required: true
- meta:
- type: dict
- default: {}
- description:
- - A hash of metadata to associate with the instance
- min_entities:
- type: int
- description:
- - The minimum number of entities that are allowed in the scaling group.
- Must be an integer between 0 and 1000.
- required: true
- name:
- type: str
- description:
- - Name to give the scaling group
- required: true
- networks:
- type: list
- elements: str
- description:
- - The network to attach to the instances. If specified, you must include
- ALL networks including the public and private interfaces. Can be C(id)
- or C(label).
- default:
- - public
- - private
- server_name:
- type: str
- description:
- - The base name for servers created by Autoscale
- required: true
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- user_data:
- type: str
- description:
- - Data to be uploaded to the servers config drive. This option implies
- O(config_drive). Can be a file path or a string
- wait:
- description:
- - wait for the scaling group to finish provisioning the minimum amount of
- servers
- type: bool
- default: false
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - community.general.rax_scaling_group:
- credentials: ~/.raxpub
- region: ORD
- cooldown: 300
- flavor: performance1-1
- image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
- min_entities: 5
- max_entities: 10
- name: ASG Test
- server_name: asgtest
- loadbalancers:
- - id: 228385
- port: 80
- register: asg
-'''
-
-import base64
-import json
-import os
-import time
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (
- rax_argument_spec, rax_find_image, rax_find_network,
- rax_required_together, rax_to_dict, setup_rax_module,
- rax_scaling_group_personality_file,
-)
-from ansible.module_utils.six import string_types
-
-
-def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
- image=None, key_name=None, loadbalancers=None, meta=None,
- min_entities=0, max_entities=0, name=None, networks=None,
- server_name=None, state='present', user_data=None,
- config_drive=False, wait=True, wait_timeout=300):
- files = {} if files is None else files
- loadbalancers = [] if loadbalancers is None else loadbalancers
- meta = {} if meta is None else meta
- networks = [] if networks is None else networks
-
- changed = False
-
- au = pyrax.autoscale
- if not au:
- module.fail_json(msg='Failed to instantiate clients. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if user_data:
- config_drive = True
-
- if user_data and os.path.isfile(user_data):
- try:
- f = open(user_data)
- user_data = f.read()
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % user_data)
-
- if state == 'present':
- # Normalize and ensure all metadata values are strings
- if meta:
- for k, v in meta.items():
- if isinstance(v, list):
- meta[k] = ','.join(['%s' % i for i in v])
- elif isinstance(v, dict):
- meta[k] = json.dumps(v)
- elif not isinstance(v, string_types):
- meta[k] = '%s' % v
-
- if image:
- image = rax_find_image(module, pyrax, image)
-
- nics = []
- if networks:
- for network in networks:
- nics.extend(rax_find_network(module, pyrax, network))
-
- for nic in nics:
- # pyrax is currently returning net-id, but we need uuid
- # this check makes this forward compatible for a time when
- # pyrax uses uuid instead
- if nic.get('net-id'):
- nic.update(uuid=nic['net-id'])
- del nic['net-id']
-
- # Handle the file contents
- personality = rax_scaling_group_personality_file(module, files)
-
- lbs = []
- if loadbalancers:
- for lb in loadbalancers:
- try:
- lb_id = int(lb.get('id'))
- except (ValueError, TypeError):
- module.fail_json(msg='Load balancer ID is not an integer: '
- '%s' % lb.get('id'))
- try:
- port = int(lb.get('port'))
- except (ValueError, TypeError):
- module.fail_json(msg='Load balancer port is not an '
- 'integer: %s' % lb.get('port'))
- if not lb_id or not port:
- continue
- lbs.append((lb_id, port))
-
- try:
- sg = au.find(name=name)
- except pyrax.exceptions.NoUniqueMatch as e:
- module.fail_json(msg='%s' % e.message)
- except pyrax.exceptions.NotFound:
- try:
- sg = au.create(name, cooldown=cooldown,
- min_entities=min_entities,
- max_entities=max_entities,
- launch_config_type='launch_server',
- server_name=server_name, image=image,
- flavor=flavor, disk_config=disk_config,
- metadata=meta, personality=personality,
- networks=nics, load_balancers=lbs,
- key_name=key_name, config_drive=config_drive,
- user_data=user_data)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if not changed:
- # Scaling Group Updates
- group_args = {}
- if cooldown != sg.cooldown:
- group_args['cooldown'] = cooldown
-
- if min_entities != sg.min_entities:
- group_args['min_entities'] = min_entities
-
- if max_entities != sg.max_entities:
- group_args['max_entities'] = max_entities
-
- if group_args:
- changed = True
- sg.update(**group_args)
-
- # Launch Configuration Updates
- lc = sg.get_launch_config()
- lc_args = {}
- if server_name != lc.get('name'):
- lc_args['server_name'] = server_name
-
- if image != lc.get('image'):
- lc_args['image'] = image
-
- if flavor != lc.get('flavor'):
- lc_args['flavor'] = flavor
-
- disk_config = disk_config or 'AUTO'
- if ((disk_config or lc.get('disk_config')) and
- disk_config != lc.get('disk_config', 'AUTO')):
- lc_args['disk_config'] = disk_config
-
- if (meta or lc.get('meta')) and meta != lc.get('metadata'):
- lc_args['metadata'] = meta
-
- test_personality = []
- for p in personality:
- test_personality.append({
- 'path': p['path'],
- 'contents': base64.b64encode(p['contents'])
- })
- if ((test_personality or lc.get('personality')) and
- test_personality != lc.get('personality')):
- lc_args['personality'] = personality
-
- if nics != lc.get('networks'):
- lc_args['networks'] = nics
-
- if lbs != lc.get('load_balancers'):
- # Work around for https://github.com/rackspace/pyrax/pull/393
- lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
-
- if key_name != lc.get('key_name'):
- lc_args['key_name'] = key_name
-
- if config_drive != lc.get('config_drive', False):
- lc_args['config_drive'] = config_drive
-
- if (user_data and
- base64.b64encode(user_data) != lc.get('user_data')):
- lc_args['user_data'] = user_data
-
- if lc_args:
- # Work around for https://github.com/rackspace/pyrax/pull/389
- if 'flavor' not in lc_args:
- lc_args['flavor'] = lc.get('flavor')
- changed = True
- sg.update_launch_config(**lc_args)
-
- sg.get()
-
- if wait:
- end_time = time.time() + wait_timeout
- infinite = wait_timeout == 0
- while infinite or time.time() < end_time:
- state = sg.get_state()
- if state["pending_capacity"] == 0:
- break
-
- time.sleep(5)
-
- module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
-
- else:
- try:
- sg = au.find(name=name)
- sg.delete()
- changed = True
- except pyrax.exceptions.NotFound as e:
- sg = {}
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- config_drive=dict(default=False, type='bool'),
- cooldown=dict(type='int', default=300),
- disk_config=dict(choices=['auto', 'manual']),
- files=dict(type='dict', default={}),
- flavor=dict(required=True),
- image=dict(required=True),
- key_name=dict(),
- loadbalancers=dict(type='list', elements='dict'),
- meta=dict(type='dict', default={}),
- min_entities=dict(type='int', required=True),
- max_entities=dict(type='int', required=True),
- name=dict(required=True),
- networks=dict(type='list', elements='str', default=['public', 'private']),
- server_name=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- user_data=dict(no_log=True),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=300, type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- config_drive = module.params.get('config_drive')
- cooldown = module.params.get('cooldown')
- disk_config = module.params.get('disk_config')
- if disk_config:
- disk_config = disk_config.upper()
- files = module.params.get('files')
- flavor = module.params.get('flavor')
- image = module.params.get('image')
- key_name = module.params.get('key_name')
- loadbalancers = module.params.get('loadbalancers')
- meta = module.params.get('meta')
- min_entities = module.params.get('min_entities')
- max_entities = module.params.get('max_entities')
- name = module.params.get('name')
- networks = module.params.get('networks')
- server_name = module.params.get('server_name')
- state = module.params.get('state')
- user_data = module.params.get('user_data')
-
- if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
- module.fail_json(msg='min_entities and max_entities must be an '
- 'integer between 0 and 1000')
-
- if not 0 <= cooldown <= 86400:
- module.fail_json(msg='cooldown must be an integer between 0 and 86400')
-
- setup_rax_module(module, pyrax)
-
- rax_asg(module, cooldown=cooldown, disk_config=disk_config,
- files=files, flavor=flavor, image=image, meta=meta,
- key_name=key_name, loadbalancers=loadbalancers,
- min_entities=min_entities, max_entities=max_entities,
- name=name, networks=networks, server_name=server_name,
- state=state, config_drive=config_drive, user_data=user_data)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
deleted file mode 100644
index 2869a6910..000000000
--- a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_scaling_policy
-short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
-description:
- - Manipulate Rackspace Cloud Autoscale Scaling Policy
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- at:
- type: str
- description:
- - The UTC time when this policy will be executed. The time must be
- formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
- V(2013-05-19T08:07:08Z)
- change:
- type: int
- description:
- - The change, either as a number of servers or as a percentage, to make
- in the scaling group. If this is a percentage, you must set
- O(is_percent) to V(true) also.
- cron:
- type: str
- description:
- - The time when the policy will be executed, as a cron entry. For
- example, if this is parameter is set to V(1 0 * * *).
- cooldown:
- type: int
- description:
- - The period of time, in seconds, that must pass before any scaling can
- occur after the previous scaling. Must be an integer between 0 and
- 86400 (24 hrs).
- default: 300
- desired_capacity:
- type: int
- description:
- - The desired server capacity of the scaling the group; that is, how
- many servers should be in the scaling group.
- is_percent:
- description:
- - Whether the value in O(change) is a percent value
- default: false
- type: bool
- name:
- type: str
- description:
- - Name to give the policy
- required: true
- policy_type:
- type: str
- description:
- - The type of policy that will be executed for the current release.
- choices:
- - webhook
- - schedule
- required: true
- scaling_group:
- type: str
- description:
- - Name of the scaling group that this policy will be added to
- required: true
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
- - community.general.rackspace
- - community.general.rackspace.openstack
- - community.general.attributes
-
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - community.general.rax_scaling_policy:
- credentials: ~/.raxpub
- region: ORD
- at: '2013-05-19T08:07:08Z'
- change: 25
- cooldown: 300
- is_percent: true
- name: ASG Test Policy - at
- policy_type: schedule
- scaling_group: ASG Test
- register: asps_at
-
- - community.general.rax_scaling_policy:
- credentials: ~/.raxpub
- region: ORD
- cron: '1 0 * * *'
- change: 25
- cooldown: 300
- is_percent: true
- name: ASG Test Policy - cron
- policy_type: schedule
- scaling_group: ASG Test
- register: asp_cron
-
- - community.general.rax_scaling_policy:
- credentials: ~/.raxpub
- region: ORD
- cooldown: 300
- desired_capacity: 5
- name: ASG Test Policy - webhook
- policy_type: webhook
- scaling_group: ASG Test
- register: asp_webhook
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
- setup_rax_module)
-
-
-def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
- desired_capacity=0, is_percent=False, name=None,
- policy_type=None, scaling_group=None, state='present'):
- changed = False
-
- au = pyrax.autoscale
- if not au:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- try:
- UUID(scaling_group)
- except ValueError:
- try:
- sg = au.find(name=scaling_group)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- try:
- sg = au.get(scaling_group)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if state == 'present':
- policies = filter(lambda p: name == p.name, sg.list_policies())
- if len(policies) > 1:
- module.fail_json(msg='No unique policy match found by name')
- if at:
- args = dict(at=at)
- elif cron:
- args = dict(cron=cron)
- else:
- args = None
-
- if not policies:
- try:
- policy = sg.add_policy(name, policy_type=policy_type,
- cooldown=cooldown, change=change,
- is_percent=is_percent,
- desired_capacity=desired_capacity,
- args=args)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- else:
- policy = policies[0]
- kwargs = {}
- if policy_type != policy.type:
- kwargs['policy_type'] = policy_type
-
- if cooldown != policy.cooldown:
- kwargs['cooldown'] = cooldown
-
- if hasattr(policy, 'change') and change != policy.change:
- kwargs['change'] = change
-
- if hasattr(policy, 'changePercent') and is_percent is False:
- kwargs['change'] = change
- kwargs['is_percent'] = False
- elif hasattr(policy, 'change') and is_percent is True:
- kwargs['change'] = change
- kwargs['is_percent'] = True
-
- if hasattr(policy, 'desiredCapacity') and change:
- kwargs['change'] = change
- elif ((hasattr(policy, 'change') or
- hasattr(policy, 'changePercent')) and desired_capacity):
- kwargs['desired_capacity'] = desired_capacity
-
- if hasattr(policy, 'args') and args != policy.args:
- kwargs['args'] = args
-
- if kwargs:
- policy.update(**kwargs)
- changed = True
-
- policy.get()
-
- module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
-
- else:
- try:
- policies = filter(lambda p: name == p.name, sg.list_policies())
- if len(policies) > 1:
- module.fail_json(msg='No unique policy match found by name')
- elif not policies:
- policy = {}
- else:
- policy.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- at=dict(),
- change=dict(type='int'),
- cron=dict(),
- cooldown=dict(type='int', default=300),
- desired_capacity=dict(type='int'),
- is_percent=dict(type='bool', default=False),
- name=dict(required=True),
- policy_type=dict(required=True, choices=['webhook', 'schedule']),
- scaling_group=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[
- ['cron', 'at'],
- ['change', 'desired_capacity'],
- ]
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- at = module.params.get('at')
- change = module.params.get('change')
- cron = module.params.get('cron')
- cooldown = module.params.get('cooldown')
- desired_capacity = module.params.get('desired_capacity')
- is_percent = module.params.get('is_percent')
- name = module.params.get('name')
- policy_type = module.params.get('policy_type')
- scaling_group = module.params.get('scaling_group')
- state = module.params.get('state')
-
- if (at or cron) and policy_type == 'webhook':
- module.fail_json(msg='policy_type=schedule is required for a time '
- 'based policy')
-
- setup_rax_module(module, pyrax)
-
- rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
- desired_capacity=desired_capacity, is_percent=is_percent,
- name=name, policy_type=policy_type, scaling_group=scaling_group,
- state=state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/redfish_command.py b/ansible_collections/community/general/plugins/modules/redfish_command.py
index 06224235a..d351e7c1d 100644
--- a/ansible_collections/community/general/plugins/modules/redfish_command.py
+++ b/ansible_collections/community/general/plugins/modules/redfish_command.py
@@ -109,9 +109,10 @@ options:
timeout:
description:
- Timeout in seconds for HTTP requests to OOB controller.
- - The default value for this param is C(10) but that is being deprecated
- and it will be replaced with C(60) in community.general 9.0.0.
+ - The default value for this parameter changed from V(10) to V(60)
+ in community.general 9.0.0.
type: int
+ default: 60
boot_override_mode:
description:
- Boot mode when using an override.
@@ -805,7 +806,7 @@ def main():
update_username=dict(type='str', aliases=["account_updatename"]),
account_properties=dict(type='dict', default={}),
bootdevice=dict(),
- timeout=dict(type='int'),
+ timeout=dict(type='int', default=60),
uefi_target=dict(),
boot_next=dict(),
boot_override_mode=dict(choices=['Legacy', 'UEFI']),
@@ -854,16 +855,6 @@ def main():
supports_check_mode=False
)
- if module.params['timeout'] is None:
- timeout = 10
- module.deprecate(
- 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
- 10, 60
- ),
- version='9.0.0',
- collection_name='community.general'
- )
-
category = module.params['category']
command_list = module.params['command']
diff --git a/ansible_collections/community/general/plugins/modules/redfish_config.py b/ansible_collections/community/general/plugins/modules/redfish_config.py
index 1fea9e7cd..129b33b2e 100644
--- a/ansible_collections/community/general/plugins/modules/redfish_config.py
+++ b/ansible_collections/community/general/plugins/modules/redfish_config.py
@@ -64,9 +64,10 @@ options:
timeout:
description:
- Timeout in seconds for HTTP requests to OOB controller.
- - The default value for this param is C(10) but that is being deprecated
- and it will be replaced with C(60) in community.general 9.0.0.
+ - The default value for this parameter changed from V(10) to V(60)
+ in community.general 9.0.0.
type: int
+ default: 60
boot_order:
required: false
description:
@@ -384,7 +385,7 @@ def main():
password=dict(no_log=True),
auth_token=dict(no_log=True),
bios_attributes=dict(type='dict', default={}),
- timeout=dict(type='int'),
+ timeout=dict(type='int', default=60),
boot_order=dict(type='list', elements='str', default=[]),
network_protocols=dict(
type='dict',
@@ -418,16 +419,6 @@ def main():
supports_check_mode=False
)
- if module.params['timeout'] is None:
- timeout = 10
- module.deprecate(
- 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
- 10, 60
- ),
- version='9.0.0',
- collection_name='community.general'
- )
-
category = module.params['category']
command_list = module.params['command']
diff --git a/ansible_collections/community/general/plugins/modules/redfish_info.py b/ansible_collections/community/general/plugins/modules/redfish_info.py
index 0b39bb6fa..3b594b7a2 100644
--- a/ansible_collections/community/general/plugins/modules/redfish_info.py
+++ b/ansible_collections/community/general/plugins/modules/redfish_info.py
@@ -63,9 +63,10 @@ options:
timeout:
description:
- Timeout in seconds for HTTP requests to OOB controller.
- - The default value for this param is C(10) but that is being deprecated
- and it will be replaced with C(60) in community.general 9.0.0.
+ - The default value for this parameter changed from V(10) to V(60)
+ in community.general 9.0.0.
type: int
+ default: 60
update_handle:
required: false
description:
@@ -407,7 +408,7 @@ def main():
username=dict(),
password=dict(no_log=True),
auth_token=dict(no_log=True),
- timeout=dict(type='int'),
+ timeout=dict(type='int', default=60),
update_handle=dict(),
manager=dict(),
),
@@ -423,16 +424,6 @@ def main():
supports_check_mode=True,
)
- if module.params['timeout'] is None:
- timeout = 10
- module.deprecate(
- 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
- 10, 60
- ),
- version='9.0.0',
- collection_name='community.general'
- )
-
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password'],
diff --git a/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/ansible_collections/community/general/plugins/modules/redhat_subscription.py
index d4b47d5d5..4a7aac483 100644
--- a/ansible_collections/community/general/plugins/modules/redhat_subscription.py
+++ b/ansible_collections/community/general/plugins/modules/redhat_subscription.py
@@ -123,10 +123,9 @@ options:
description:
- Upon successful registration, auto-consume available subscriptions
- |
- Please note that the alias O(autosubscribe) will be removed in
+ Please note that the alias O(ignore:autosubscribe) was removed in
community.general 9.0.0.
type: bool
- aliases: [autosubscribe]
activationkey:
description:
- supply an activation key for use with registration
@@ -1106,17 +1105,7 @@ def main():
'server_port': {},
'rhsm_baseurl': {},
'rhsm_repo_ca_cert': {},
- 'auto_attach': {
- 'type': 'bool',
- 'aliases': ['autosubscribe'],
- 'deprecated_aliases': [
- {
- 'name': 'autosubscribe',
- 'version': '9.0.0',
- 'collection_name': 'community.general',
- },
- ],
- },
+ 'auto_attach': {'type': 'bool'},
'activationkey': {'no_log': True},
'org_id': {},
'environment': {},
diff --git a/ansible_collections/community/general/plugins/modules/slackpkg.py b/ansible_collections/community/general/plugins/modules/slackpkg.py
index e3d7a1542..9347db159 100644
--- a/ansible_collections/community/general/plugins/modules/slackpkg.py
+++ b/ansible_collections/community/general/plugins/modules/slackpkg.py
@@ -106,9 +106,8 @@ def remove_packages(module, slackpkg_path, packages):
continue
if not module.check_mode:
- rc, out, err = module.run_command("%s -default_answer=y -batch=on \
- remove %s" % (slackpkg_path,
- package))
+ rc, out, err = module.run_command(
+ [slackpkg_path, "-default_answer=y", "-batch=on", "remove", package])
if not module.check_mode and query_package(module, slackpkg_path,
package):
@@ -132,9 +131,8 @@ def install_packages(module, slackpkg_path, packages):
continue
if not module.check_mode:
- rc, out, err = module.run_command("%s -default_answer=y -batch=on \
- install %s" % (slackpkg_path,
- package))
+ rc, out, err = module.run_command(
+ [slackpkg_path, "-default_answer=y", "-batch=on", "install", package])
if not module.check_mode and not query_package(module, slackpkg_path,
package):
@@ -155,9 +153,8 @@ def upgrade_packages(module, slackpkg_path, packages):
for package in packages:
if not module.check_mode:
- rc, out, err = module.run_command("%s -default_answer=y -batch=on \
- upgrade %s" % (slackpkg_path,
- package))
+ rc, out, err = module.run_command(
+ [slackpkg_path, "-default_answer=y", "-batch=on", "upgrade", package])
if not module.check_mode and not query_package(module, slackpkg_path,
package):
@@ -174,7 +171,8 @@ def upgrade_packages(module, slackpkg_path, packages):
def update_cache(module, slackpkg_path):
- rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ rc, out, err = module.run_command(
+ [slackpkg_path, "-batch=on", "update"])
if rc != 0:
module.fail_json(msg="Could not update package cache")
diff --git a/ansible_collections/community/general/plugins/modules/snap.py b/ansible_collections/community/general/plugins/modules/snap.py
index fd1676480..16c3aec48 100644
--- a/ansible_collections/community/general/plugins/modules/snap.py
+++ b/ansible_collections/community/general/plugins/modules/snap.py
@@ -194,6 +194,7 @@ class Snap(StateModuleHelper):
},
supports_check_mode=True,
)
+ use_old_vardict = False
@staticmethod
def _first_non_zero(a):
@@ -405,8 +406,8 @@ class Snap(StateModuleHelper):
def state_present(self):
- self.vars.meta('classic').set(output=True)
- self.vars.meta('channel').set(output=True)
+ self.vars.set_meta('classic', output=True)
+ self.vars.set_meta('channel', output=True)
actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH]
if actionable_refresh:
diff --git a/ansible_collections/community/general/plugins/modules/snap_alias.py b/ansible_collections/community/general/plugins/modules/snap_alias.py
index 54448c6f3..ba54a9e15 100644
--- a/ansible_collections/community/general/plugins/modules/snap_alias.py
+++ b/ansible_collections/community/general/plugins/modules/snap_alias.py
@@ -105,6 +105,7 @@ class SnapAlias(StateModuleHelper):
],
supports_check_mode=True,
)
+ use_old_vardict = False
def _aliases(self):
n = self.vars.name
diff --git a/ansible_collections/community/general/plugins/modules/stackdriver.py b/ansible_collections/community/general/plugins/modules/stackdriver.py
deleted file mode 100644
index 35b2b0dc1..000000000
--- a/ansible_collections/community/general/plugins/modules/stackdriver.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: stackdriver
-short_description: Send code deploy and annotation events to stackdriver
-description:
- - Send code deploy and annotation events to Stackdriver
-author: "Ben Whaley (@bwhaley)"
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- key:
- type: str
- description:
- - API key.
- required: true
- event:
- type: str
- description:
- - The type of event to send, either annotation or deploy
- choices: ['annotation', 'deploy']
- required: true
- revision_id:
- type: str
- description:
- - The revision of the code that was deployed. Required for deploy events
- deployed_by:
- type: str
- description:
- - The person or robot responsible for deploying the code
- default: "Ansible"
- deployed_to:
- type: str
- description:
- - "The environment code was deployed to. (ie: development, staging, production)"
- repository:
- type: str
- description:
- - The repository (or project) deployed
- msg:
- type: str
- description:
- - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation.
- annotated_by:
- type: str
- description:
- - The person or robot who the annotation should be attributed to.
- default: "Ansible"
- level:
- type: str
- description:
- - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display.
- choices: ['INFO', 'WARN', 'ERROR']
- default: 'INFO'
- instance_id:
- type: str
- description:
- - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
- event_epoch:
- type: str
- description:
- - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
-'''
-
-EXAMPLES = '''
-- name: Send a code deploy event to stackdriver
- community.general.stackdriver:
- key: AAAAAA
- event: deploy
- deployed_to: production
- deployed_by: leeroyjenkins
- repository: MyWebApp
- revision_id: abcd123
-
-- name: Send an annotation event to stackdriver
- community.general.stackdriver:
- key: AAAAAA
- event: annotation
- msg: Greetings from Ansible
- annotated_by: leeroyjenkins
- level: WARN
- instance_id: i-abcd1234
-'''
-
-# ===========================================
-# Stackdriver module specific support methods.
-#
-
-import json
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.urls import fetch_url
-
-
-def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
- """Send a deploy event to Stackdriver"""
- deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
-
- params = {}
- params['revision_id'] = revision_id
- params['deployed_by'] = deployed_by
- if deployed_to:
- params['deployed_to'] = deployed_to
- if repository:
- params['repository'] = repository
-
- return do_send_request(module, deploy_api, params, key)
-
-
-def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
- """Send an annotation event to Stackdriver"""
- annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
-
- params = {}
- params['message'] = msg
- if annotated_by:
- params['annotated_by'] = annotated_by
- if level:
- params['level'] = level
- if instance_id:
- params['instance_id'] = instance_id
- if event_epoch:
- params['event_epoch'] = event_epoch
-
- return do_send_request(module, annotation_api, params, key)
-
-
-def do_send_request(module, url, params, key):
- data = json.dumps(params)
- headers = {
- 'Content-Type': 'application/json',
- 'x-stackdriver-apikey': key
- }
- response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
- if info['status'] != 200:
- module.fail_json(msg="Unable to send msg: %s" % info['msg'])
-
-
-# ===========================================
-# Module execution.
-#
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict( # @TODO add types
- key=dict(required=True, no_log=True),
- event=dict(required=True, choices=['deploy', 'annotation']),
- msg=dict(),
- revision_id=dict(),
- annotated_by=dict(default='Ansible'),
- level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
- instance_id=dict(),
- event_epoch=dict(), # @TODO int?
- deployed_by=dict(default='Ansible'),
- deployed_to=dict(),
- repository=dict(),
- ),
- supports_check_mode=True
- )
-
- key = module.params["key"]
- event = module.params["event"]
-
- # Annotation params
- msg = module.params["msg"]
- annotated_by = module.params["annotated_by"]
- level = module.params["level"]
- instance_id = module.params["instance_id"]
- event_epoch = module.params["event_epoch"]
-
- # Deploy params
- revision_id = module.params["revision_id"]
- deployed_by = module.params["deployed_by"]
- deployed_to = module.params["deployed_to"]
- repository = module.params["repository"]
-
- ##################################################################
- # deploy requires revision_id
- # annotation requires msg
- # We verify these manually
- ##################################################################
-
- if event == 'deploy':
- if not revision_id:
- module.fail_json(msg="revision_id required for deploy events")
- try:
- send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
- except Exception as e:
- module.fail_json(msg="unable to sent deploy event: %s" % to_native(e),
- exception=traceback.format_exc())
-
- if event == 'annotation':
- if not msg:
- module.fail_json(msg="msg required for annotation events")
- try:
- send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
- except Exception as e:
- module.fail_json(msg="unable to sent annotation event: %s" % to_native(e),
- exception=traceback.format_exc())
-
- changed = True
- module.exit_json(changed=changed, deployed_by=deployed_by)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/svr4pkg.py b/ansible_collections/community/general/plugins/modules/svr4pkg.py
index db9902c77..56ded66e6 100644
--- a/ansible_collections/community/general/plugins/modules/svr4pkg.py
+++ b/ansible_collections/community/general/plugins/modules/svr4pkg.py
@@ -120,7 +120,7 @@ def package_installed(module, name, category):
if category:
cmd.append('-c')
cmd.append(name)
- rc, out, err = module.run_command(' '.join(cmd))
+ rc, out, err = module.run_command(cmd)
if rc == 0:
return True
else:
diff --git a/ansible_collections/community/general/plugins/modules/swdepot.py b/ansible_collections/community/general/plugins/modules/swdepot.py
index 28a8ce314..9ba1b02b3 100644
--- a/ansible_collections/community/general/plugins/modules/swdepot.py
+++ b/ansible_collections/community/general/plugins/modules/swdepot.py
@@ -68,7 +68,6 @@ EXAMPLES = '''
import re
from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import shlex_quote
def compare_package(version1, version2):
@@ -94,13 +93,13 @@ def compare_package(version1, version2):
def query_package(module, name, depot=None):
""" Returns whether a package is installed or not and version. """
- cmd_list = '/usr/sbin/swlist -a revision -l product'
+ cmd_list = ['/usr/sbin/swlist', '-a', 'revision', '-l', 'product']
if depot:
- rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
- use_unsafe_shell=True)
- else:
- rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
+ cmd_list.extend(['-s', depot])
+ cmd_list.append(name)
+ rc, stdout, stderr = module.run_command(cmd_list)
if rc == 0:
+ stdout = ''.join(line for line in stdout.splitlines(True) if name in line)
version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
else:
version = None
@@ -112,7 +111,7 @@ def remove_package(module, name):
""" Uninstall package if installed. """
cmd_remove = '/usr/sbin/swremove'
- rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+ rc, stdout, stderr = module.run_command([cmd_remove, name])
if rc == 0:
return rc, stdout
@@ -123,8 +122,8 @@ def remove_package(module, name):
def install_package(module, depot, name):
""" Install package if not already installed """
- cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
- rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ cmd_install = ['/usr/sbin/swinstall', '-x', 'mount_all_filesystems=false']
+ rc, stdout, stderr = module.run_command(cmd_install + ["-s", depot, name])
if rc == 0:
return rc, stdout
else:
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_app.py b/ansible_collections/community/general/plugins/modules/webfaction_app.py
deleted file mode 100644
index 81bfc8b68..000000000
--- a/ansible_collections/community/general/plugins/modules/webfaction_app.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
-# * Andy Baker
-# * Federico Tarantini
-#
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Create a Webfaction application using Ansible and the Webfaction API
-#
-# Valid application types can be found by looking here:
-# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: webfaction_app
-short_description: Add or remove applications on a Webfaction host
-description:
- - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
- your host, you may want to add C(serial=1) to the plays.
- - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
-
-extends_documentation_fragment:
- - community.general.attributes
-
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-
-options:
- name:
- description:
- - The name of the application
- required: true
- type: str
-
- state:
- description:
- - Whether the application should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- type:
- description:
- - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
- required: true
- type: str
-
- autostart:
- description:
- - Whether the app should restart with an C(autostart.cgi) script
- type: bool
- default: false
-
- extra_info:
- description:
- - Any extra parameters required by the app
- default: ''
- type: str
-
- port_open:
- description:
- - IF the port should be opened
- type: bool
- default: false
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-
- machine:
- description:
- - The machine name to use (optional for accounts with only one machine)
- type: str
-
-'''
-
-EXAMPLES = '''
- - name: Create a test app
- community.general.webfaction_app:
- name: "my_wsgi_app1"
- state: present
- type: mod_wsgi35-python27
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
- machine: "{{webfaction_machine}}"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], default='present'),
- type=dict(required=True),
- autostart=dict(type='bool', default=False),
- extra_info=dict(default=""),
- port_open=dict(type='bool', default=False),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- machine=dict(),
- ),
- supports_check_mode=True
- )
- app_name = module.params['name']
- app_type = module.params['type']
- app_state = module.params['state']
-
- if module.params['machine']:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password'],
- module.params['machine']
- )
- else:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- app_list = webfaction.list_apps(session_id)
- app_map = dict([(i['name'], i) for i in app_list])
- existing_app = app_map.get(app_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if app_state == 'present':
-
- # Does an app with this name already exist?
- if existing_app:
- if existing_app['type'] != app_type:
- module.fail_json(msg="App already exists with different type. Please fix by hand.")
-
- # If it exists with the right type, we don't change it
- # Should check other parameters.
- module.exit_json(
- changed=False,
- result=existing_app,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, create the app
- result.update(
- webfaction.create_app(
- session_id, app_name, app_type,
- module.boolean(module.params['autostart']),
- module.params['extra_info'],
- module.boolean(module.params['port_open'])
- )
- )
-
- elif app_state == 'absent':
-
- # If the app's already not there, nothing changed.
- if not existing_app:
- module.exit_json(
- changed=False,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, delete the app
- result.update(
- webfaction.delete_app(session_id, app_name)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(app_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_db.py b/ansible_collections/community/general/plugins/modules/webfaction_db.py
deleted file mode 100644
index 5428de5b6..000000000
--- a/ansible_collections/community/general/plugins/modules/webfaction_db.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
-# * Andy Baker
-# * Federico Tarantini
-#
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Create a webfaction database using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: webfaction_db
-short_description: Add or remove a database on Webfaction
-description:
- - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
- your host, you may want to add C(serial=1) to the plays.
- - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
-
- name:
- description:
- - The name of the database
- required: true
- type: str
-
- state:
- description:
- - Whether the database should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- type:
- description:
- - The type of database to create.
- required: true
- choices: ['mysql', 'postgresql']
- type: str
-
- password:
- description:
- - The password for the new database user.
- type: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-
- machine:
- description:
- - The machine name to use (optional for accounts with only one machine)
- type: str
-'''
-
-EXAMPLES = '''
- # This will also create a default DB user with the same
- # name as the database, and the specified password.
-
- - name: Create a database
- community.general.webfaction_db:
- name: "{{webfaction_user}}_db1"
- password: mytestsql
- type: mysql
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
- machine: "{{webfaction_machine}}"
-
- # Note that, for symmetry's sake, deleting a database using
- # 'state: absent' will also delete the matching user.
-
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], default='present'),
- # You can specify an IP address or hostname.
- type=dict(required=True, choices=['mysql', 'postgresql']),
- password=dict(no_log=True),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- machine=dict(),
- ),
- supports_check_mode=True
- )
- db_name = module.params['name']
- db_state = module.params['state']
- db_type = module.params['type']
- db_passwd = module.params['password']
-
- if module.params['machine']:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password'],
- module.params['machine']
- )
- else:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- db_list = webfaction.list_dbs(session_id)
- db_map = dict([(i['name'], i) for i in db_list])
- existing_db = db_map.get(db_name)
-
- user_list = webfaction.list_db_users(session_id)
- user_map = dict([(i['username'], i) for i in user_list])
- existing_user = user_map.get(db_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if db_state == 'present':
-
- # Does a database with this name already exist?
- if existing_db:
- # Yes, but of a different type - fail
- if existing_db['db_type'] != db_type:
- module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
-
- # If it exists with the right type, we don't change anything.
- module.exit_json(
- changed=False,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, create the db
- # and default user.
- result.update(
- webfaction.create_db(
- session_id, db_name, db_type, db_passwd
- )
- )
-
- elif db_state == 'absent':
-
- # If this isn't a dry run...
- if not module.check_mode:
-
- if not (existing_db or existing_user):
- module.exit_json(changed=False,)
-
- if existing_db:
- # Delete the db if it exists
- result.update(
- webfaction.delete_db(session_id, db_name, db_type)
- )
-
- if existing_user:
- # Delete the default db user if it exists
- result.update(
- webfaction.delete_db_user(session_id, db_name, db_type)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(db_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_domain.py b/ansible_collections/community/general/plugins/modules/webfaction_domain.py
deleted file mode 100644
index 4c87a539a..000000000
--- a/ansible_collections/community/general/plugins/modules/webfaction_domain.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2015, Quentin Stafford-Fraser
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Create Webfaction domains and subdomains using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: webfaction_domain
-short_description: Add or remove domains and subdomains on Webfaction
-description:
- - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - If you are I(deleting) domains by using O(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
- If you do not specify subdomains, the domain will be deleted.
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
- your host, you may want to add C(serial=1) to the plays.
- - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
-
-extends_documentation_fragment:
- - community.general.attributes
-
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-
-options:
-
- name:
- description:
- - The name of the domain
- required: true
- type: str
-
- state:
- description:
- - Whether the domain should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- subdomains:
- description:
- - Any subdomains to create.
- default: []
- type: list
- elements: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-'''
-
-EXAMPLES = '''
- - name: Create a test domain
- community.general.webfaction_domain:
- name: mydomain.com
- state: present
- subdomains:
- - www
- - blog
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
-
- - name: Delete test domain and any subdomains
- community.general.webfaction_domain:
- name: mydomain.com
- state: absent
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
-
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], default='present'),
- subdomains=dict(default=[], type='list', elements='str'),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- ),
- supports_check_mode=True
- )
- domain_name = module.params['name']
- domain_state = module.params['state']
- domain_subdomains = module.params['subdomains']
-
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- domain_list = webfaction.list_domains(session_id)
- domain_map = dict([(i['domain'], i) for i in domain_list])
- existing_domain = domain_map.get(domain_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if domain_state == 'present':
-
- # Does an app with this name already exist?
- if existing_domain:
-
- if set(existing_domain['subdomains']) >= set(domain_subdomains):
- # If it exists with the right subdomains, we don't change anything.
- module.exit_json(
- changed=False,
- )
-
- positional_args = [session_id, domain_name] + domain_subdomains
-
- if not module.check_mode:
- # If this isn't a dry run, create the app
- # print positional_args
- result.update(
- webfaction.create_domain(
- *positional_args
- )
- )
-
- elif domain_state == 'absent':
-
- # If the app's already not there, nothing changed.
- if not existing_domain:
- module.exit_json(
- changed=False,
- )
-
- positional_args = [session_id, domain_name] + domain_subdomains
-
- if not module.check_mode:
- # If this isn't a dry run, delete the app
- result.update(
- webfaction.delete_domain(*positional_args)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
deleted file mode 100644
index 119dfd283..000000000
--- a/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2015, Quentin Stafford-Fraser and Andy Baker
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Create webfaction mailbox using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: webfaction_mailbox
-short_description: Add or remove mailboxes on Webfaction
-description:
- - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
- your host, you may want to add C(serial=1) to the plays.
- - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
-
-extends_documentation_fragment:
- - community.general.attributes
-
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-
-options:
-
- mailbox_name:
- description:
- - The name of the mailbox
- required: true
- type: str
-
- mailbox_password:
- description:
- - The password for the mailbox
- required: true
- type: str
-
- state:
- description:
- - Whether the mailbox should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-'''
-
-EXAMPLES = '''
- - name: Create a mailbox
- community.general.webfaction_mailbox:
- mailbox_name="mybox"
- mailbox_password="myboxpw"
- state=present
- login_name={{webfaction_user}}
- login_password={{webfaction_passwd}}
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- mailbox_name=dict(required=True),
- mailbox_password=dict(required=True, no_log=True),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- ),
- supports_check_mode=True
- )
-
- mailbox_name = module.params['mailbox_name']
- site_state = module.params['state']
-
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
- existing_mailbox = mailbox_name in mailbox_list
-
- result = {}
-
- # Here's where the real stuff happens
-
- if site_state == 'present':
-
- # Does a mailbox with this name already exist?
- if existing_mailbox:
- module.exit_json(changed=False,)
-
- positional_args = [session_id, mailbox_name]
-
- if not module.check_mode:
- # If this isn't a dry run, create the mailbox
- result.update(webfaction.create_mailbox(*positional_args))
-
- elif site_state == 'absent':
-
- # If the mailbox is already not there, nothing changed.
- if not existing_mailbox:
- module.exit_json(changed=False)
-
- if not module.check_mode:
- # If this isn't a dry run, delete the mailbox
- result.update(webfaction.delete_mailbox(session_id, mailbox_name))
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(site_state))
-
- module.exit_json(changed=True, result=result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_site.py b/ansible_collections/community/general/plugins/modules/webfaction_site.py
deleted file mode 100644
index 7795c45fe..000000000
--- a/ansible_collections/community/general/plugins/modules/webfaction_site.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2015, Quentin Stafford-Fraser
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Create Webfaction website using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-
-deprecated:
- removed_in: 9.0.0
- why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
- alternative: no known alternative at this point
-
-module: webfaction_site
-short_description: Add or remove a website on a Webfaction host
-description:
- - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you do not need to know the IP
- address. You can use a DNS name.
- - If a site of the same name exists in the account but on a different host, the operation will exit.
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
- your host, you may want to add C(serial=1) to the plays.
- - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
-
-extends_documentation_fragment:
- - community.general.attributes
-
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-
-options:
-
- name:
- description:
- - The name of the website
- required: true
- type: str
-
- state:
- description:
- - Whether the website should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- host:
- description:
- - The webfaction host on which the site should be created.
- required: true
- type: str
-
- https:
- description:
- - Whether or not to use HTTPS
- type: bool
- default: false
-
- site_apps:
- description:
- - A mapping of URLs to apps
- default: []
- type: list
- elements: list
-
- subdomains:
- description:
- - A list of subdomains associated with this site.
- default: []
- type: list
- elements: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-'''
-
-EXAMPLES = '''
- - name: Create website
- community.general.webfaction_site:
- name: testsite1
- state: present
- host: myhost.webfaction.com
- subdomains:
- - 'testsite1.my_domain.org'
- site_apps:
- - ['testapp1', '/']
- https: false
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
-'''
-
-import socket
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], default='present'),
- # You can specify an IP address or hostname.
- host=dict(required=True),
- https=dict(required=False, type='bool', default=False),
- subdomains=dict(type='list', elements='str', default=[]),
- site_apps=dict(type='list', elements='list', default=[]),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- ),
- supports_check_mode=True
- )
- site_name = module.params['name']
- site_state = module.params['state']
- site_host = module.params['host']
- site_ip = socket.gethostbyname(site_host)
-
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- site_list = webfaction.list_websites(session_id)
- site_map = dict([(i['name'], i) for i in site_list])
- existing_site = site_map.get(site_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if site_state == 'present':
-
- # Does a site with this name already exist?
- if existing_site:
-
- # If yes, but it's on a different IP address, then fail.
- # If we wanted to allow relocation, we could add a 'relocate=true' option
- # which would get the existing IP address, delete the site there, and create it
- # at the new address. A bit dangerous, perhaps, so for now we'll require manual
- # deletion if it's on another host.
-
- if existing_site['ip'] != site_ip:
- module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
-
- # If it's on this host and the key parameters are the same, nothing needs to be done.
-
- if (existing_site['https'] == module.boolean(module.params['https'])) and \
- (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
- (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
- module.exit_json(
- changed=False
- )
-
- positional_args = [
- session_id, site_name, site_ip,
- module.boolean(module.params['https']),
- module.params['subdomains'],
- ]
- for a in module.params['site_apps']:
- positional_args.append((a[0], a[1]))
-
- if not module.check_mode:
- # If this isn't a dry run, create or modify the site
- result.update(
- webfaction.create_website(
- *positional_args
- ) if not existing_site else webfaction.update_website(
- *positional_args
- )
- )
-
- elif site_state == 'absent':
-
- # If the site's already not there, nothing changed.
- if not existing_site:
- module.exit_json(
- changed=False,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, delete the site
- result.update(
- webfaction.delete_website(session_id, site_name, site_ip)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(site_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/xfconf.py b/ansible_collections/community/general/plugins/modules/xfconf.py
index 8ed44c675..15943ae59 100644
--- a/ansible_collections/community/general/plugins/modules/xfconf.py
+++ b/ansible_collections/community/general/plugins/modules/xfconf.py
@@ -187,6 +187,7 @@ class XFConfProperty(StateModuleHelper):
required_together=[('value', 'value_type')],
supports_check_mode=True,
)
+ use_old_vardict = False
default_state = 'present'
@@ -196,7 +197,7 @@ class XFConfProperty(StateModuleHelper):
self.vars.channel)
self.vars.set('previous_value', self._get())
self.vars.set('type', self.vars.value_type)
- self.vars.meta('value').set(initial_value=self.vars.previous_value)
+ self.vars.set_meta('value', initial_value=self.vars.previous_value)
def process_command_output(self, rc, out, err):
if err.rstrip() == self.does_not:
diff --git a/ansible_collections/community/general/plugins/modules/xfconf_info.py b/ansible_collections/community/general/plugins/modules/xfconf_info.py
index 844ef3c11..3d56a70cb 100644
--- a/ansible_collections/community/general/plugins/modules/xfconf_info.py
+++ b/ansible_collections/community/general/plugins/modules/xfconf_info.py
@@ -139,6 +139,7 @@ class XFConfInfo(ModuleHelper):
),
supports_check_mode=True,
)
+ use_old_vardict = False
def __init_module__(self):
self.runner = xfconf_runner(self.module, check_rc=True)
@@ -176,7 +177,7 @@ class XFConfInfo(ModuleHelper):
proc = self._process_list_properties
with self.runner.context('list_arg channel property', output_process=proc) as ctx:
- result = ctx.run(**self.vars)
+ result = ctx.run(**self.vars.as_dict())
if not self.vars.list_arg and self.vars.is_array:
output = "value_array"