summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/google/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/google/plugins
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/google/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py39
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py799
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py497
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py605
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py904
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py511
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py164
17 files changed, 5940 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py b/collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py
new file mode 100644
index 00000000..06872543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # GCP doc fragment.
+ DOCUMENTATION = r'''
+options:
+ project:
+ description:
+ - The Google Cloud Platform project to use.
+ type: str
+ auth_kind:
+ description:
+ - The type of credential used.
+ type: str
+ required: true
+ choices: [ application, machineaccount, serviceaccount ]
+ service_account_contents:
+ description:
+ - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
+ type: jsonarg
+ service_account_file:
+ description:
+ - The path of a Service Account JSON file if serviceaccount is selected as type.
+ type: path
+ service_account_email:
+ description:
+ - An optional service account email address if machineaccount is selected
+ and the user does not wish to use the default email.
+ type: str
+ scopes:
+ description:
+ - Array of scopes to be used.
+ type: list
+ elements: str
+ env_type:
+ description:
+ - Specifies which Ansible environment you're running this module within.
+ - This should not be set unless you know what you're doing.
+ - This only alters the User Agent string for any API requests.
+ type: str
+notes:
+ - for authentication, you can set service_account_file using the
+ c(gcp_service_account_file) env variable.
+ - for authentication, you can set service_account_contents using the
+ c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
+ - For authentication, you can set service_account_email using the
+ C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
+ - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
+ variable.
+ - For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+ - The I(service_account_email) and I(service_account_file) options are
+ mutually exclusive.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py b/collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py
new file mode 100644
index 00000000..ae58c5c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py
@@ -0,0 +1,156 @@
+# (c) 2019, Eric Anderson <eric.sysmin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: gcp_storage_file
+description:
+ - This lookup returns the contents from a file residing on Google Cloud Storage
+short_description: Return GC Storage content
+author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
+requirements:
+ - python >= 2.6
+ - requests >= 2.18.4
+ - google-auth >= 1.3.0
+options:
+ src:
+ description:
+ - Source location of file (may be local machine or cloud depending on action).
+ required: false
+ bucket:
+ description:
+ - The name of the bucket.
+ required: false
+extends_documentation_fragment:
+- community.google._gcp
+
+'''
+
+EXAMPLES = '''
+- ansible.builtin.debug:
+ msg: |
+ the value of foo.txt is {{ lookup('community.google.gcp_storage_file',
+ bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
+ auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
+'''
+
+RETURN = '''
+_raw:
+ description:
+ - base64 encoded file content
+ type: list
+ elements: str
+'''
+
+import base64
+import json
+import mimetypes
+import os
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession
+ HAS_GOOGLE_CLOUD_COLLECTION = True
+except ImportError:
+ HAS_GOOGLE_CLOUD_COLLECTION = False
+
+
+display = Display()
+
+
+class GcpMockModule(object):
+ def __init__(self, params):
+ self.params = params
+
+ def fail_json(self, *args, **kwargs):
+ raise AnsibleError(kwargs['msg'])
+
+ def raise_for_status(self, response):
+ try:
+ response.raise_for_status()
+ except getattr(requests.exceptions, 'RequestException'):
+ self.fail_json(msg="GCP returned error: %s" % response.json())
+
+
+class GcpFileLookup():
+ def get_file_contents(self, module):
+ auth = GcpSession(module, 'storage')
+ data = auth.get(self.media_link(module))
+ return base64.b64encode(data.content.rstrip())
+
+ def fetch_resource(self, module, link, allow_not_found=True):
+ auth = GcpSession(module, 'storage')
+ return self.return_if_object(module, auth.get(link), allow_not_found)
+
+ def self_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
+
+ def media_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
+
+ def return_if_object(self, module, response, allow_not_found=False):
+ # If not found, return nothing.
+ if allow_not_found and response.status_code == 404:
+ return None
+ # If no content, return nothing.
+ if response.status_code == 204:
+ return None
+ try:
+ module.raise_for_status(response)
+ result = response.json()
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
+ raise AnsibleError("Invalid JSON response with error: %s" % inst)
+ if navigate_hash(result, ['error', 'errors']):
+ raise AnsibleError(navigate_hash(result, ['error', 'errors']))
+ return result
+
+ def object_headers(self, module):
+ return {
+ "name": module.params['src'],
+ "Content-Type": mimetypes.guess_type(module.params['src'])[0],
+ "Content-Length": str(os.path.getsize(module.params['src'])),
+ }
+
+ def run(self, terms, variables=None, **kwargs):
+ params = {
+ 'bucket': kwargs.get('bucket', None),
+ 'src': kwargs.get('src', None),
+ 'projects': kwargs.get('projects', None),
+ 'scopes': kwargs.get('scopes', None),
+ 'zones': kwargs.get('zones', None),
+ 'auth_kind': kwargs.get('auth_kind', None),
+ 'service_account_file': kwargs.get('service_account_file', None),
+ 'service_account_email': kwargs.get('service_account_email', None),
+ }
+
+ if not params['scopes']:
+ params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
+
+ fake_module = GcpMockModule(params)
+
+ # Check if files exist.
+ remote_object = self.fetch_resource(fake_module, self.self_link(fake_module))
+ if not remote_object:
+ raise AnsibleError("File does not exist in bucket")
+
+ result = self.get_file_contents(fake_module)
+ return [result]
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_GOOGLE_CLOUD_COLLECTION:
+ raise AnsibleError("community.google.gcp_storage_file needs a supported version of the google.cloud collection installed")
+ if not HAS_REQUESTS:
+ raise AnsibleError("community.google.gcp_storage_file needs requests installed. Use `pip install requests` to install it")
+ return GcpFileLookup().run(terms, variables=variables, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py
new file mode 100644
index 00000000..7698e3c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py
@@ -0,0 +1,39 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+from ansible_collections.community.google.plugins.module_utils.gcp import gcp_connect
+from ansible_collections.community.google.plugins.module_utils.gcp import unexpected_error_msg as gcp_error
+
+USER_AGENT_PRODUCT = "Ansible-gce"
+USER_AGENT_VERSION = "v1"
+
+
+def gce_connect(module, provider=None):
+ """Return a GCP connection for Google Compute Engine."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+ provider = provider or Provider.GCE
+
+ return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return gcp_error(error)
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py
new file mode 100644
index 00000000..a034f3b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py
@@ -0,0 +1,799 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+# libcloud
+try:
+ import libcloud
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+# google-auth
+try:
+ import google.auth
+ from google.oauth2 import service_account
+ HAS_GOOGLE_AUTH = True
+except ImportError:
+ HAS_GOOGLE_AUTH = False
+
+# google-python-api
+try:
+ import google_auth_httplib2
+ from httplib2 import Http
+ from googleapiclient.http import set_user_agent
+ from googleapiclient.errors import HttpError
+ from apiclient.discovery import build
+ HAS_GOOGLE_API_LIB = True
+except ImportError:
+ HAS_GOOGLE_API_LIB = False
+
+
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
+
+
+def _get_gcp_ansible_credentials(module):
+ """Helper to fetch creds from AnsibleModule object."""
+ service_account_email = module.params.get('service_account_email', None)
+ # Note: pem_file is discouraged and will be deprecated
+ credentials_file = module.params.get('pem_file', None) or module.params.get(
+ 'credentials_file', None)
+ project_id = module.params.get('project_id', None)
+
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_environ_var(var_name, default_value):
+ """Wrapper around os.environ.get call."""
+ return os.environ.get(
+ var_name, default_value)
+
+
+def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
+ """Helper to look in environment variables for credentials."""
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ if not service_account_email:
+ service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
+ if not credentials_file:
+ credentials_file = _get_gcp_environ_var(
+ 'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
+ 'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
+ 'GCE_PEM_FILE_PATH', None)
+ if not project_id:
+ project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
+ 'GOOGLE_CLOUD_PROJECT', None)
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
+ """
+ Obtain GCP credentials by trying various methods.
+
+ There are 3 ways to specify GCP credentials:
+ 1. Specify via Ansible module parameters (recommended).
+ 2. Specify via environment variables. Two sets of env vars are available:
+ a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
+ b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
+ using p12 key)
+ 3. Specify via libcloud secrets.py file (deprecated).
+
+ There are 3 helper functions to assist in the above.
+
+ Regardless of method, the user also has the option of specifying a JSON
+ file or a p12 file as the credentials file. JSON is strongly recommended and
+ p12 will be removed in the future.
+
+ Additionally, flags may be set to require valid json and check the libcloud
+ version.
+
+ AnsibleModule.fail_json is called only if the project_id cannot be found.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param require_valid_json: If true, require credentials to be valid JSON. Default is True.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :return: {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+ :rtype: ``dict``
+ """
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_ansible_credentials(module)
+
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_environment_credentials(service_account_email,
+ credentials_file, project_id)
+
+ if credentials_file is None or project_id is None or service_account_email is None:
+ if check_libcloud is True:
+ if project_id is None:
+ # TODO(supertom): this message is legacy and integration tests
+ # depend on it.
+ module.fail_json(msg='Missing GCE connection parameters in libcloud '
+ 'secrets file.')
+ else:
+ if project_id is None:
+ module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
+ 'credentials file (%s)' % (project_id, credentials_file)))
+ # Set these fields to empty strings if they are None
+ # consumers of this will make the distinction between an empty string
+ # and None.
+ if credentials_file is None:
+ credentials_file = ''
+ if service_account_email is None:
+ service_account_email = ''
+
+ # ensure the credentials file is found and is in the proper format.
+ if credentials_file:
+ _validate_credentials_file(module, credentials_file,
+ require_valid_json=require_valid_json,
+ check_libcloud=check_libcloud)
+
+ return {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+
+
+def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
+ """
+ Check for valid credentials file.
+
+ Optionally check for JSON format and if libcloud supports JSON.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param credentials_file: path to file on disk
+ :type credentials_file: ``str``. Complete path to file on disk.
+
+ :param require_valid_json: This argument is ignored as of Ansible 2.7.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :returns: True
+ :rtype: ``bool``
+ """
+ try:
+ # Try to read credentials as JSON
+ with open(credentials_file) as credentials:
+ json.loads(credentials.read())
+ # If the credentials are proper JSON and we do not have the minimum
+ # required libcloud version, bail out and return a descriptive
+ # error
+ if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
+ module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
+ 'Upgrade to libcloud>=0.17.0.')
+ return True
+ except IOError as e:
+ module.fail_json(msg='GCP Credentials File %s not found.' %
+ credentials_file, changed=False)
+ return False
+ except ValueError as e:
+ module.fail_json(
+ msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console',
+ changed=False)
+
+
+def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
+ """Return a Google libcloud driver connection."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+
+ creds = _get_gcp_credentials(module,
+ require_valid_json=False,
+ check_libcloud=True)
+ try:
+ gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
+ datacenter=module.params.get('zone', None),
+ project=creds['project_id'])
+ gcp.connection.user_agent_append("%s/%s" % (
+ user_agent_product, user_agent_version))
+ except (RuntimeError, ValueError) as e:
+ module.fail_json(msg=str(e), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ return gcp
+
+
+def get_google_cloud_credentials(module, scopes=None):
+ """
+ Get credentials object for use with Google Cloud client.
+
+ Attempts to obtain credentials by calling _get_gcp_credentials. If those are
+ not present will attempt to connect via Application Default Credentials.
+
+ To connect via libcloud, don't use this function, use gcp_connect instead. For
+ Google Python API Client, see get_google_api_auth for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google Cloud example:
+ creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type module: ``list`` of URIs
+
+ :returns: A tuple containing (google authorized) credentials object and
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_AUTH:
+ module.fail_json(msg='Please install google-auth.')
+
+ conn_params = _get_gcp_credentials(module,
+ require_valid_json=True,
+ check_libcloud=False)
+ try:
+ if conn_params['credentials_file']:
+ credentials = service_account.Credentials.from_service_account_file(
+ conn_params['credentials_file'])
+ if scopes:
+ credentials = credentials.with_scopes(scopes)
+ else:
+ (credentials, project_id) = google.auth.default(
+ scopes=scopes)
+ if project_id is not None:
+ conn_params['project_id'] = project_id
+
+ return (credentials, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'):
+ """
+ Authentication for use with google-python-api-client.
+
+ Function calls get_google_cloud_credentials, which attempts to assemble the credentials
+ from various locations. Next it attempts to authenticate with Google.
+
+ This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
+
+ For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
+ get_google_cloud_credentials for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google API example:
+ http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
+ service = build('myservice', 'v1', http=http_auth)
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type scopes: ``list`` of URIs
+
+ :param user_agent_product: User agent product. eg: 'ansible-python-api'
+ :type user_agent_product: ``str``
+
+ :param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
+ :type user_agent_version: ``str``
+
+ :returns: A tuple containing (google authorized) httplib2 request object and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_API_LIB:
+ module.fail_json(msg="Please install google-api-python-client library")
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+ try:
+ (credentials, conn_params) = get_google_cloud_credentials(module, scopes)
+ http = set_user_agent(Http(), '%s-%s' %
+ (user_agent_product, user_agent_version))
+ http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
+
+ return (http_auth, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_client(module, service, user_agent_product, user_agent_version,
+ scopes=None, api_version='v1'):
+ """
+ Get the discovery-based python client. Use when a cloud client is not available.
+
+ client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ :returns: A tuple containing the authorized client to the specified service and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+
+ http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
+ user_agent_product=user_agent_product,
+ user_agent_version=user_agent_version)
+ client = build(service, api_version, http=http_auth)
+
+ return (client, conn_params)
+
+
+def check_min_pkg_version(pkg_name, minimum_version):
+ """Minimum required version is >= installed version."""
+ from pkg_resources import get_distribution
+ try:
+ installed_version = get_distribution(pkg_name).version
+ return LooseVersion(installed_version) >= minimum_version
+ except Exception as e:
+ return False
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
+
+
+def get_valid_location(module, driver, location, location_type='zone'):
+ if location_type == 'zone':
+ l = driver.ex_get_zone(location)
+ else:
+ l = driver.ex_get_region(location)
+ if l is None:
+ link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
+ module.fail_json(msg=('%s %s is invalid. Please see the list of '
+ 'available %s at %s' % (
+ location_type, location, location_type, link)),
+ changed=False)
+ return l
+
+
+def check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if 'required' in d and d['required'] is True:
+ raise ValueError(("%s is required and must be of type: %s" %
+ (d['name'], str(d['type']))))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
+ d['name'], str(d['type']), params[d['name']],
+ type(params[d['name']]))))
+ if 'values' in d:
+ if params[d['name']] not in d['values']:
+ raise ValueError(("%s must be one of: %s" % (
+ d['name'], ','.join(d['values']))))
+ if isinstance(params[d['name']], int):
+ if 'min' in d:
+ if params[d['name']] < d['min']:
+ raise ValueError(("%s must be greater than or equal to: %s" % (
+ d['name'], d['min'])))
+ if 'max' in d:
+ if params[d['name']] > d['max']:
+ raise ValueError("%s must be less than or equal to: %s" % (
+ d['name'], d['max']))
+ return True
+
+
+class GCPUtils(object):
+ """
+ Helper utilities for GCP.
+ """
+
+ @staticmethod
+ def underscore_to_camel(txt):
+ return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
+
+ @staticmethod
+ def remove_non_gcp_params(params):
+ """
+ Remove params if found.
+ """
+ params_to_remove = ['state']
+ for p in params_to_remove:
+ if p in params:
+ del params[p]
+
+ return params
+
+ @staticmethod
+ def params_to_gcp_dict(params, resource_name=None):
+ """
+ Recursively convert ansible params to GCP Params.
+
+ Keys are converted from snake to camelCase
+ ex: default_service to defaultService
+
+ Handles lists, dicts and strings
+
+ special provision for the resource name
+ """
+ if not isinstance(params, dict):
+ return params
+ gcp_dict = {}
+ params = GCPUtils.remove_non_gcp_params(params)
+ for k, v in params.items():
+ gcp_key = GCPUtils.underscore_to_camel(k)
+ if isinstance(v, dict):
+ retval = GCPUtils.params_to_gcp_dict(v)
+ gcp_dict[gcp_key] = retval
+ elif isinstance(v, list):
+ gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
+ else:
+ if resource_name and k == resource_name:
+ gcp_dict['name'] = v
+ else:
+ gcp_dict[gcp_key] = v
+ return gcp_dict
+
+ @staticmethod
+ def execute_api_client_req(req, client=None, raw=True,
+ operation_timeout=180, poll_interval=5,
+ raise_404=True):
+ """
+ General python api client interaction function.
+
+ For use with google-api-python-client, or clients created
+ with get_google_api_client function
+ Not for use with Google Cloud client libraries
+
+ For long-running operations, we make an immediate query and then
+ sleep poll_interval before re-querying. After the request is done
+ we rebuild the request with a get method and return the result.
+
+ """
+ try:
+ resp = req.execute()
+
+ if not resp:
+ return None
+
+ if raw:
+ return resp
+
+ if resp['kind'] == 'compute#operation':
+ resp = GCPUtils.execute_api_client_operation_req(req, resp,
+ client,
+ operation_timeout,
+ poll_interval)
+
+ if 'items' in resp:
+ return resp['items']
+
+ return resp
+ except HttpError as h:
+ # Note: 404s can be generated (incorrectly) for dependent
+ # resources not existing. We let the caller determine if
+ # they want 404s raised for their invocation.
+ if h.resp.status == 404 and not raise_404:
+ return None
+ else:
+ raise
+ except Exception:
+ raise
+
+ @staticmethod
+ def execute_api_client_operation_req(orig_req, op_resp, client,
+ operation_timeout=180, poll_interval=5):
+ """
+ Poll an operation for a result.
+ """
+ parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
+ project_id = parsed_url['project']
+ resource_name = GCPUtils.get_gcp_resource_from_methodId(
+ orig_req.methodId)
+ resource = GCPUtils.build_resource_from_name(client, resource_name)
+
+ start_time = time.time()
+
+ complete = False
+ attempts = 1
+ while not complete:
+ if start_time + operation_timeout >= time.time():
+ op_req = client.globalOperations().get(
+ project=project_id, operation=op_resp['name'])
+ op_resp = op_req.execute()
+ if op_resp['status'] != 'DONE':
+ time.sleep(poll_interval)
+ attempts += 1
+ else:
+ complete = True
+ if op_resp['operationType'] == 'delete':
+ # don't wait for the delete
+ return True
+ elif op_resp['operationType'] in ['insert', 'update', 'patch']:
+ # TODO(supertom): Isolate 'build-new-request' stuff.
+ resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
+ resource_name)
+ if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
+ parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
+ 'entity_name']
+ args = {'project': project_id,
+ resource_name_singular: parsed_url['entity_name']}
+ new_req = resource.get(**args)
+ resp = new_req.execute()
+ return resp
+ else:
+ # assuming multiple entities, do a list call.
+ new_req = resource.list(project=project_id)
+ resp = new_req.execute()
+ return resp
+ else:
+ # operation didn't complete on time.
+ raise GCPOperationTimeoutError("Operation timed out: %s" % (
+ op_resp['targetLink']))
+
+ @staticmethod
+ def build_resource_from_name(client, resource_name):
+ try:
+ method = getattr(client, resource_name)
+ return method()
+ except AttributeError:
+ raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
+ client))
+
+ @staticmethod
+ def get_gcp_resource_from_methodId(methodId):
+ try:
+ parts = methodId.split('.')
+ if len(parts) != 3:
+ return None
+ else:
+ return parts[1]
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def get_entity_name_from_resource_name(resource_name):
+ if not resource_name:
+ return None
+
+ try:
+ # Chop off global or region prefixes
+ if resource_name.startswith('global'):
+ resource_name = resource_name.replace('global', '')
+ elif resource_name.startswith('regional'):
+ resource_name = resource_name.replace('region', '')
+
+ # ensure we have a lower case first letter
+ resource_name = resource_name[0].lower() + resource_name[1:]
+
+ if resource_name[-3:] == 'ies':
+ return resource_name.replace(
+ resource_name[-3:], 'y')
+ if resource_name[-1] == 's':
+ return resource_name[:-1]
+
+ return resource_name
+
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def parse_gcp_url(url):
+ """
+ Parse GCP urls and return dict of parts.
+
+ Supported URL structures:
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
+
+ :param url: GCP-generated URL, such as a selflink or resource location.
+ :type url: ``str``
+
+ :return: dictionary of parts. Includes stanard components of urlparse, plus
+ GCP-specific 'service', 'api_version', 'project' and
+ 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
+ and 'method_name', if applicable.
+ :rtype: ``dict``
+ """
+
+ p = urlparse.urlparse(url)
+ if not p:
+ return None
+ else:
+ # we add extra items such as
+ # zone, region and resource_name
+ url_parts = {}
+ url_parts['scheme'] = p.scheme
+ url_parts['host'] = p.netloc
+ url_parts['path'] = p.path
+ if p.path.find('/') == 0:
+ url_parts['path'] = p.path[1:]
+ url_parts['params'] = p.params
+ url_parts['fragment'] = p.fragment
+ url_parts['query'] = p.query
+ url_parts['project'] = None
+ url_parts['service'] = None
+ url_parts['api_version'] = None
+
+ path_parts = url_parts['path'].split('/')
+ url_parts['service'] = path_parts[0]
+ url_parts['api_version'] = path_parts[1]
+ if path_parts[2] == 'projects':
+ url_parts['project'] = path_parts[3]
+ else:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ if 'global' in path_parts:
+ url_parts['global'] = True
+ idx = path_parts.index('global')
+ if len(path_parts) - idx == 4:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+ url_parts['method_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx == 2:
+ url_parts['resource_name'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx < 2:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ elif 'regions' in path_parts or 'zones' in path_parts:
+ idx = -1
+ if 'regions' in path_parts:
+ idx = path_parts.index('regions')
+ url_parts['region'] = path_parts[idx + 1]
+ else:
+ idx = path_parts.index('zones')
+ url_parts['zone'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ else:
+ # no location in URL.
+ idx = path_parts.index('projects')
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ return url_parts
+
+ @staticmethod
+ def build_googleapi_url(project, api_version='v1', service='compute'):
+ return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
+
+ @staticmethod
+ def filter_gcp_fields(params, excluded_fields=None):
+ new_params = {}
+ if not excluded_fields:
+ excluded_fields = ['creationTimestamp', 'id', 'kind',
+ 'selfLink', 'fingerprint', 'description']
+
+ if isinstance(params, list):
+ new_params = [GCPUtils.filter_gcp_fields(
+ x, excluded_fields) for x in params]
+ elif isinstance(params, dict):
+ for k in params.keys():
+ if k not in excluded_fields:
+ new_params[k] = GCPUtils.filter_gcp_fields(
+ params[k], excluded_fields)
+ else:
+ new_params = params
+
+ return new_params
+
+ @staticmethod
+ def are_params_equal(p1, p2):
+ """
+ Check if two params dicts are equal.
+ TODO(supertom): need a way to filter out URLs, or they need to be built
+ """
+ filtered_p1 = GCPUtils.filter_gcp_fields(p1)
+ filtered_p2 = GCPUtils.filter_gcp_fields(p2)
+ if filtered_p1 != filtered_p2:
+ return False
+ return True
+
+
+class GCPError(Exception):
+ pass
+
+
+class GCPOperationTimeoutError(GCPError):
+ pass
+
+
+class GCPInvalidURLError(GCPError):
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py
new file mode 100644
index 00000000..8344c251
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gc_storage
+short_description: This module manages objects/buckets in Google Cloud Storage.
+description:
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
+
+options:
+ bucket:
+ type: str
+ description:
+ - Bucket name.
+ required: true
+ object:
+ type: path
+ description:
+ - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
+ src:
+ type: str
+ description:
+ - The source file path when performing a PUT operation.
+ dest:
+ type: path
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ overwrite:
+ description:
+ - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ type: bool
+ default: 'yes'
+ aliases: [ 'force' ]
+ permission:
+ type: str
+ description:
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
+ default: private
+ choices: ['private', 'public-read', 'authenticated-read']
+ headers:
+ type: dict
+ description:
+ - Headers to attach to object.
+ default: {}
+ expiration:
+ type: int
+ default: 600
+ description:
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
+ aliases: [expiry]
+ mode:
+ type: str
+ description:
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
+ required: true
+ choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
+ gs_secret_key:
+ type: str
+ description:
+ - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used.
+ required: true
+ gs_access_key:
+ type: str
+ description:
+ - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used.
+ required: true
+ region:
+ type: str
+ description:
+ - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations)
+ default: 'US'
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ default: false
+
+requirements:
+ - "python >= 2.6"
+ - "boto >= 2.9"
+
+author:
+- Benno Joy (@bennojoy)
+- Lukas Beumer (@Nitaco)
+
+'''
+
+EXAMPLES = '''
+- name: Upload some content
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ community.google.gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ community.google.gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ community.google.gc_storage:
+ bucket: mybucket
+ mode: delete
+
+- name: Create a bucket with versioning enabled
+ community.google.gc_storage:
+ bucket: "mybucket"
+ versioning: yes
+ mode: create
+
+- name: Create a bucket located in the eu
+ community.google.gc_storage:
+ bucket: "mybucket"
+ region: "europe-west3"
+ mode: create
+
+'''
+
+import os
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def grant_check(module, gs, obj):
+ try:
+ acp = obj.get_acl()
+ if module.params.get('permission') == 'public-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ if not grant:
+ obj.set_acl('public-read')
+ module.exit_json(changed=True, result="The objects permission as been set to public-read")
+ if module.params.get('permission') == 'authenticated-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ if not grant:
+ obj.set_acl('authenticated-read')
+ module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ return True
+
+
+def key_check(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if key_check:
+ grant_check(module, gs, key_check)
+ return True
+ else:
+ return False
+
+
+def keysum(module, gs, bucket, obj):
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ return md5_remote
+
+
+def bucket_check(module, gs, bucket):
+ try:
+ result = gs.lookup(bucket)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if result:
+ grant_check(module, gs, result)
+ return True
+ else:
+ return False
+
+
+def create_bucket(module, gs, bucket):
+ try:
+ bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region'))
+ bucket.set_acl(module.params.get('permission'))
+ bucket.configure_versioning(module.params.get('versioning'))
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if bucket:
+ return True
+
+
+def delete_bucket(module, gs, bucket):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket_contents = bucket.list()
+ for key in bucket_contents:
+ bucket.delete_key(key.name)
+ bucket.delete()
+ return True
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_key(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket ", changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def create_dirkey(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def transform_headers(headers):
+ """
+ Boto url-encodes values unless we convert the value to `str`, so doing
+ this prevents 'max-age=100000' from being converted to "max-age%3D100000".
+
+ :param headers: Headers to convert
+ :type headers: dict
+ :rtype: dict
+
+ """
+
+ for key, value in headers.items():
+ headers[key] = str(value)
+ return headers
+
+
+def upload_gsfile(module, gs, bucket, obj, src, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(
+ filename=src,
+ headers=transform_headers(module.params.get('headers'))
+ )
+ key.set_acl(module.params.get('permission'))
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsfile(module, gs, bucket, obj, dest):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsstr(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def get_download_url(module, gs, bucket, obj, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def handle_get(module, gs, bucket, obj, overwrite, dest):
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(dest)
+ if md5_local == md5_remote:
+ module.exit_json(changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+ else:
+ download_gsfile(module, gs, bucket, obj, dest)
+
+
+def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
+ # Lets check to see if bucket exists to get ground truth.
+ bucket_rc = bucket_check(module, gs, bucket)
+ key_rc = key_check(module, gs, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucket_rc and key_rc:
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(src)
+ if md5_local == md5_remote:
+ module.exit_json(msg="Local and remote object are identical", changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+ else:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ if not bucket_rc:
+ create_bucket(module, gs, bucket)
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucket_rc and not key_rc:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+
+def handle_delete(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ if bucket and obj:
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, obj):
+ module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
+ else:
+ module.exit_json(msg="Object does not exist.", changed=False)
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ else:
+ module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
+
+
+def handle_create(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, gs, bucket, dirobj)
+ else:
+ create_bucket(module, gs, bucket)
+ create_dirkey(module, gs, bucket, dirobj)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default=False, type='bool')
+ ),
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade')
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
+ gs_secret_key = module.params.get('gs_secret_key')
+ gs_access_key = module.params.get('gs_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if mode == 'put':
+ if not src or not object:
+ module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
+ if mode == 'get':
+ if not dest or not object:
+ module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
+
+ try:
+ gs = boto.connect_gs(gs_access_key, gs_secret_key)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if mode == 'get':
+ if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
+ module.fail_json(msg="Target bucket/key cannot be found", failed=True)
+ if not path_check(dest):
+ download_gsfile(module, gs, bucket, obj, dest)
+ else:
+ handle_get(module, gs, bucket, obj, overwrite, dest)
+
+ if mode == 'put':
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ handle_put(module, gs, bucket, obj, overwrite, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ handle_delete(module, gs, bucket, obj)
+
+ if mode == 'create':
+ handle_create(module, gs, bucket, obj)
+
+ if mode == 'get_url':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ get_download_url(module, gs, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+ # --------------------------- Get the String contents of an Object -------------------------
+ if mode == 'get_str':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ download_gsstr(module, gs, bucket, obj)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py
new file mode 100644
index 00000000..a9ad45e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gce_eip
+short_description: Create or Destroy Global or Regional External IP addresses.
+description:
+ - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
+ U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+notes:
+ - Global addresses can only be used with Global Forwarding Rules.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of Address.
+ required: true
+ region:
+ type: str
+ description:
+ - Region to create the address in. Set to 'global' to create a global address.
+ required: true
+ state:
+ type: str
+ description: The state the address should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+'''
+
+EXAMPLES = '''
+- name: Create a Global external IP address
+ community.google.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: global
+ state: present
+
+- name: Create a Regional external IP address
+ community.google.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: us-east1
+ state: present
+'''
+
+RETURN = '''
+address:
+ description: IP address being operated on
+ returned: always
+ type: str
+ sample: "35.186.222.233"
+name:
+ description: name of the address being operated on
+ returned: always
+ type: str
+ sample: "my-address"
+region:
+ description: Which region an address belongs.
+ returned: always
+ type: str
+ sample: "global"
+'''
+
+USER_AGENT_VERSION = 'v1'
+USER_AGENT_PRODUCT = 'Ansible-gce_eip'
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import gcp_connect
+
+
+def get_address(gce, name, region):
+ """
+ Get an Address from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Address.
+ :type name: ``str``
+
+ :return: A GCEAddress object or None.
+ :rtype: :class: `GCEAddress` or None
+ """
+ try:
+ return gce.ex_get_address(name=name, region=region)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_address(gce, params):
+ """
+ Create a new Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+
+ address = gce.ex_create_address(
+ name=params['name'], region=params['region'])
+
+ if address:
+ changed = True
+ return_data = address.address
+
+ return (changed, return_data)
+
+
+def delete_address(address):
+ """
+ Delete an Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+ if address.destroy():
+ changed = True
+ return_data = address.address
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ region=dict(required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (+0.19) required for this module.')
+
+ gce = gcp_connect(module, Provider.GCE, get_driver,
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['name'] = module.params.get('name')
+ params['region'] = module.params.get('region')
+
+ changed = False
+ json_output = {'state': params['state']}
+ address = get_address(gce, params['name'], region=params['region'])
+
+ if params['state'] == 'absent':
+ if not address:
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown address: %s" %
+ (params['name']))
+ else:
+ # Delete
+ (changed, json_output['address']) = delete_address(address)
+ else:
+ if not address:
+ # Create
+ (changed, json_output['address']) = create_address(gce,
+ params)
+ else:
+ changed = False
+ json_output['address'] = address.address
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py
new file mode 100644
index 00000000..9e53e1e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ type: str
+ description:
+ - the name of the image to create or delete
+ required: true
+ description:
+ type: str
+ description:
+ - an optional description
+ family:
+ type: str
+ description:
+ - an optional family name
+ source:
+ type: str
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ state:
+ type: str
+ description:
+ - desired state of the image
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ type: str
+ description:
+ - the zone of the disk specified by source
+ default: "us-central1-a"
+ timeout:
+ type: int
+ description:
+ - timeout for the operation
+ default: 180
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (@supertom)"
+'''
+
+EXAMPLES = '''
+- name: Create an image named test-image from the disk 'test-disk' in zone us-central1-a
+ community.google.gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+- name: Create an image named test-image from a tarball in Google Cloud Storage
+ community.google.gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+- name: Alternatively use the gs scheme
+ community.google.gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+- name: Delete an image named test-image
+ community.google.gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py
new file mode 100644
index 00000000..ec436b42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_instance_template
+short_description: create or destroy instance templates of Compute Engine of GCP.
+description:
+ - Creates or destroy Google instance templates
+ of Compute Engine of Google Cloud Platform.
+options:
+ state:
+ type: str
+ description:
+ - The desired state for the instance template.
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ type: str
+ description:
+ - The name of the GCE instance template.
+ required: True
+ aliases: [base_name]
+ size:
+ type: str
+ description:
+ - The desired machine type for the instance template.
+ default: "f1-micro"
+ source:
+ type: str
+ description:
+ - A source disk to attach to the instance.
+ Cannot specify both I(image) and I(source).
+ image:
+ type: str
+ description:
+ - The image to use to create the instance.
+ Cannot specify both both I(image) and I(source).
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ default: debian-8
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ choices:
+ - pd-standard
+ - pd-ssd
+ default: pd-standard
+ disk_auto_delete:
+ description:
+ - Indicate that the boot disk should be
+ deleted when the Node is deleted.
+ default: true
+ type: bool
+ network:
+ type: str
+ description:
+ - The network to associate with the instance.
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - The Subnetwork resource name for this instance.
+ can_ip_forward:
+ description:
+ - Set to C(yes) to allow instance to
+ send/receive non-matching src/dst packets.
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - The external IP address to use.
+ If C(ephemeral), a new non-static address will be
+ used. If C(None), then no external address will
+ be used. To use an existing static IP address
+ specify address name.
+ default: "ephemeral"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ automatic_restart:
+ description:
+ - Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine.
+ type: bool
+ preemptible:
+ description:
+ - Defines whether the instance is preemptible.
+ type: bool
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ description:
+ type: str
+ description:
+ - description of instance template
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ nic_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted networkInterfaces[] structure.
+ disks_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted formatted disks[] structure. Case sensitive.
+ see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ subnetwork_region:
+ type: str
+ description:
+ - Region that subnetwork resides in. (Required for subnetwork to successfully complete)
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - JSON credentials strongly preferred.
+author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
+'''
+
+EXAMPLES = '''
+# Usage
+- name: Create instance template named foo
+ community.google.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "your-project-name"
+ credentials_file: "/path/to/your-key.json"
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+
+# Example Playbook
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.google.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ - name: Delete instance template
+ community.google.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: absent
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+
+# Example playbook using disks_gce_struct
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.google.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ disks_gce_struct:
+ - device_name: /dev/sda
+ boot: true
+ autoDelete: true
+ initializeParams:
+ diskSizeGb: 30
+ diskType: pd-ssd
+ sourceImage: projects/debian-cloud/global/images/family/debian-8
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+from ansible.module_utils._text import to_native
+
+
+def get_info(inst):
+ """Retrieves instance template information
+ """
+ return({
+ 'name': inst.name,
+ 'extra': inst.extra,
+ })
+
+
+def create_instance_template(module, gce):
+ """Create an instance template
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ # get info from module
+ name = module.params.get('name')
+ size = module.params.get('size')
+ source = module.params.get('source')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ disk_type = module.params.get('disk_type')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ subnetwork_region = module.params.get('subnetwork_region')
+ can_ip_forward = module.params.get('can_ip_forward')
+ external_ip = module.params.get('external_ip')
+ service_account_permissions = module.params.get(
+ 'service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+ on_host_maintenance = module.params.get('on_host_maintenance')
+ automatic_restart = module.params.get('automatic_restart')
+ preemptible = module.params.get('preemptible')
+ tags = module.params.get('tags')
+ metadata = module.params.get('metadata')
+ description = module.params.get('description')
+ disks_gce_struct = module.params.get('disks_gce_struct')
+ changed = False
+
+ # args of ex_create_instancetemplate
+ gce_args = dict(
+ name="instance",
+ size="f1-micro",
+ source=None,
+ image=None,
+ disk_type='pd-standard',
+ disk_auto_delete=True,
+ network='default',
+ subnetwork=None,
+ can_ip_forward=None,
+ external_ip='ephemeral',
+ service_accounts=None,
+ on_host_maintenance=None,
+ automatic_restart=None,
+ preemptible=None,
+ tags=None,
+ metadata=None,
+ description=None,
+ disks_gce_struct=None,
+ nic_gce_struct=None
+ )
+
+ gce_args['name'] = name
+ gce_args['size'] = size
+
+ if source is not None:
+ gce_args['source'] = source
+
+ if image:
+ gce_args['image'] = image
+ else:
+ if image_family:
+ image = gce.ex_get_image_from_family(image_family)
+ gce_args['image'] = image
+ else:
+ gce_args['image'] = "debian-8"
+
+ gce_args['disk_type'] = disk_type
+ gce_args['disk_auto_delete'] = disk_auto_delete
+
+ gce_network = gce.ex_get_network(network)
+ gce_args['network'] = gce_network
+
+ if subnetwork is not None:
+ gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
+
+ if can_ip_forward is not None:
+ gce_args['can_ip_forward'] = can_ip_forward
+
+ if external_ip == "ephemeral":
+ instance_external_ip = external_ip
+ elif external_ip == "none":
+ instance_external_ip = None
+ else:
+ try:
+ instance_external_ip = gce.ex_get_address(external_ip)
+ except GoogleBaseError as err:
+ # external_ip is name ?
+ instance_external_ip = external_ip
+ gce_args['external_ip'] = instance_external_ip
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP:
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email is not None:
+ ex_sa_perms.append({'email': str(service_account_email)})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+ gce_args['service_accounts'] = ex_sa_perms
+
+ if on_host_maintenance is not None:
+ gce_args['on_host_maintenance'] = on_host_maintenance
+
+ if automatic_restart is not None:
+ gce_args['automatic_restart'] = automatic_restart
+
+ if preemptible is not None:
+ gce_args['preemptible'] = preemptible
+
+ if tags is not None:
+ gce_args['tags'] = tags
+
+ if disks_gce_struct is not None:
+ gce_args['disks_gce_struct'] = disks_gce_struct
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+ gce_args['metadata'] = metadata
+
+ if description is not None:
+ gce_args['description'] = description
+
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ except ResourceNotFoundError:
+ try:
+ instance = gce.ex_create_instancetemplate(**gce_args)
+ changed = True
+ except GoogleBaseError as err:
+ module.fail_json(
+ msg='Unexpected error attempting to create instance {0}, error: {1}'
+ .format(
+ instance,
+ err.value
+ )
+ )
+
+ if instance:
+ json_data = get_info(instance)
+ else:
+ module.fail_json(msg="no instance template!")
+
+ return (changed, json_data, name)
+
+
+def delete_instance_template(module, gce):
+ """ Delete instance template.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ name = module.params.get('name')
+ current_state = "absent"
+ changed = False
+
+ # get instance template
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ json_data = dict(msg='instance template not exists: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state == "present":
+ rc = instance.destroy()
+ if rc:
+ changed = True
+ else:
+ module.fail_json(
+ msg='instance template destroy failed'
+ )
+
+ json_data = {}
+ return (changed, json_data, name)
+
+
+def module_controller(module, gce):
+ ''' Control module state parameter.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ nothing
+ Exit:
+ AnsibleModule object exit with json data.
+ '''
+ json_output = dict()
+ state = module.params.get("state")
+ if state == "present":
+ (changed, output, name) = create_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+ elif state == "absent":
+ (changed, output, name) = delete_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+
+ module.exit_json(**json_output)
+
+
+def check_if_system_state_would_be_changed(module, gce):
+ ''' check_if_system_state_would_be_changed !
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ system_state changed
+ '''
+ changed = False
+ current_state = "absent"
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+
+ try:
+ gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state != state:
+ changed = True
+
+ if current_state == "absent":
+ if changed:
+ output = 'instance template {0} will be created'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+ if current_state == "present":
+ if changed:
+ output = 'instance template {0} will be destroyed'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+
+ return (changed, output)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ name=dict(required=True, aliases=['base_name']),
+ size=dict(default='f1-micro'),
+ source=dict(),
+ image=dict(),
+ image_family=dict(default='debian-8'),
+ disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
+ disk_auto_delete=dict(type='bool', default=True),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ can_ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ automatic_restart=dict(type='bool', default=None),
+ preemptible=dict(type='bool', default=None),
+ tags=dict(type='list'),
+ metadata=dict(),
+ description=dict(),
+ disks=dict(type='list'),
+ nic_gce_struct=dict(type='list'),
+ project_id=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ subnetwork_region=dict(),
+ disks_gce_struct=dict(type='list')
+ ),
+ mutually_exclusive=[['source', 'image']],
+ required_one_of=[['image', 'image_family']],
+ supports_check_mode=True
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ try:
+ gce = gce_connect(module)
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
+
+ if module.check_mode:
+ (changed, output) = check_if_system_state_would_be_changed(module, gce)
+ module.exit_json(
+ changed=changed,
+ msg=output
+ )
+ else:
+ module_controller(module, gce)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py
new file mode 100644
index 00000000..3eed2df2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_labels
+short_description: Create, Update or Destroy GCE Labels.
+description:
+ - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
+ When specifying the GCE resource, users may specify the full URL for
+ the resource (its 'self_link'), or the individual parameters of the
+ resource (type, location, name). Examples for the two options can be
+ seen in the documentation.
+ See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
+ more information about GCE Labels. Labels are gradually being added to
+ more GCE resources, so this module will need to be updated as new
+ resources are added to the GCE (v1) API.
+requirements:
+ - 'python >= 2.6'
+ - 'google-api-python-client >= 1.6.2'
+ - 'google-auth >= 1.0.0'
+ - 'google-auth-httplib2 >= 0.0.2'
+notes:
+ - Labels support resources such as instances, disks, images, etc. See
+ U(https://cloud.google.com/compute/docs/labeling-resources) for the list
+ of resources available in the GCE v1 API (not alpha or beta).
+author:
+ - 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
+options:
+ labels:
+ type: dict
+ description:
+ - A list of labels (key/value pairs) to add or remove for the resource.
+ required: false
+ resource_url:
+ type: str
+ description:
+ - The 'self_link' for the resource (instance, disk, snapshot, etc)
+ required: false
+ resource_type:
+ type: str
+ description:
+ - The type of resource (instances, disks, snapshots, images)
+ required: false
+ resource_location:
+ type: str
+ description:
+ - The location of resource (global, us-central1-f, etc.)
+ required: false
+ resource_name:
+ type: str
+ description:
+ - The name of resource.
+ required: false
+ state:
+ type: str
+ description: The state the labels should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Add labels on an existing instance (using resource_url)
+ community.google.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: present
+- name: Add labels on an image (using resource params)
+ community.google.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_type: images
+ resource_location: global
+ resource_name: my-custom-image
+ state: present
+- name: Remove specified labels from the GCE instance
+ community.google.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ environment: prod
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: absent
+'''
+
+RETURN = '''
+labels:
+ description: List of labels that exist on the resource.
+ returned: Always.
+ type: dict
+ sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
+resource_url:
+ description: The 'self_link' of the GCE resource.
+ returned: Always.
+ type: str
+ sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
+resource_type:
+ description: The type of the GCE resource.
+ returned: Always.
+ type: str
+ sample: instances
+resource_location:
+ description: The location of the GCE resource.
+ returned: Always.
+ type: str
+ sample: us-central1-f
+resource_name:
+ description: The name of the GCE resource.
+ returned: Always.
+ type: str
+ sample: my-happy-little-instance
+state:
+ description: state of the labels
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+
+
+UA_PRODUCT = 'ansible-gce_labels'
+UA_VERSION = '0.0.1'
+GCE_API_VERSION = 'v1'
+
+# TODO(all): As Labels are added to more GCE resources, this list will need to
+# be updated (along with some code changes below). The list can *only* include
+# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
+KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
+
+
+def _fetch_resource(client, module):
+ params = module.params
+ if params['resource_url']:
+ if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
+ module.fail_json(
+ msg='Invalid self_link url: %s' % params['resource_url'])
+ else:
+ parts = params['resource_url'].split('/')[8:]
+ if len(parts) == 2:
+ resource_type, resource_name = parts
+ resource_location = 'global'
+ else:
+ resource_location, resource_type, resource_name = parts
+ else:
+ if not params['resource_type'] or not params['resource_location'] \
+ or not params['resource_name']:
+ module.fail_json(msg='Missing required resource params.')
+ resource_type = params['resource_type'].lower()
+ resource_name = params['resource_name'].lower()
+ resource_location = params['resource_location'].lower()
+
+ if resource_type not in KNOWN_RESOURCES:
+ module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if resource_type == 'instances':
+ resource = client.instances().get(project=params['project_id'],
+ zone=resource_location,
+ instance=resource_name).execute()
+ elif resource_type == 'disks':
+ resource = client.disks().get(project=params['project_id'],
+ zone=resource_location,
+ disk=resource_name).execute()
+ elif resource_type == 'snapshots':
+ resource = client.snapshots().get(project=params['project_id'],
+ snapshot=resource_name).execute()
+ elif resource_type == 'images':
+ resource = client.images().get(project=params['project_id'],
+ image=resource_name).execute()
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % resource_type)
+
+ return resource.get('labelFingerprint', ''), {
+ 'resource_name': resource.get('name'),
+ 'resource_url': resource.get('selfLink'),
+ 'resource_type': resource_type,
+ 'resource_location': resource_location,
+ 'labels': resource.get('labels', {})
+ }
+
+
+def _set_labels(client, new_labels, module, ri, fingerprint):
+ params = module.params
+ result = err = None
+ labels = {
+ 'labels': new_labels,
+ 'labelFingerprint': fingerprint
+ }
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if ri['resource_type'] == 'instances':
+ req = client.instances().setLabels(project=params['project_id'],
+ instance=ri['resource_name'],
+ zone=ri['resource_location'],
+ body=labels)
+ elif ri['resource_type'] == 'disks':
+ req = client.disks().setLabels(project=params['project_id'],
+ zone=ri['resource_location'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'snapshots':
+ req = client.snapshots().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'images':
+ req = client.images().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
+
+ # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
+ # method to poll for the async request/operation to complete before
+ # returning. However, during 'beta', we are in an odd state where
+ # API requests must be sent to the 'compute/beta' API, but the python
+ # client library only allows for *Operations.get() requests to be
+ # sent to 'compute/v1' API. The response operation is in the 'beta'
+ # API-scope, but the client library cannot find the operation (404).
+ # result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ # return result, err
+ result = req.execute()
+ return True, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ labels=dict(required=False, type='dict', default={}),
+ resource_url=dict(required=False, type='str'),
+ resource_name=dict(required=False, type='str'),
+ resource_location=dict(required=False, type='str'),
+ resource_type=dict(required=False, type='str'),
+ project_id=dict()
+ ),
+ required_together=[
+ ['resource_name', 'resource_location', 'resource_type']
+ ],
+ mutually_exclusive=[
+ ['resource_url', 'resource_name'],
+ ['resource_url', 'resource_location'],
+ ['resource_url', 'resource_type']
+ ]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ client, cparams = get_google_api_client(module, 'compute',
+ user_agent_product=UA_PRODUCT,
+ user_agent_version=UA_VERSION,
+ api_version=GCE_API_VERSION)
+
+ # Get current resource info including labelFingerprint
+ fingerprint, resource_info = _fetch_resource(client, module)
+ new_labels = resource_info['labels'].copy()
+
+ update_needed = False
+ if module.params['state'] == 'absent':
+ for k, v in module.params['labels'].items():
+ if k in new_labels:
+ if new_labels[k] == v:
+ update_needed = True
+ new_labels.pop(k, None)
+ else:
+ module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
+ else:
+ for k, v in module.params['labels'].items():
+ if k not in new_labels:
+ update_needed = True
+ new_labels[k] = v
+
+ changed = False
+ json_output = {'state': module.params['state']}
+ if update_needed:
+ changed, err = _set_labels(client, new_labels, module, resource_info,
+ fingerprint)
+ json_output['changed'] = changed
+
+ # TODO(erjohnso): probably want to re-fetch the resource to return the
+ # new labelFingerprint, check that desired labels match updated labels.
+ # BUT! Will need to wait for setLabels() to hit v1 API so we can use the
+ # GCPUtils feature to poll for the operation to be complete. For now,
+ # we'll just update the output with what we have from the original
+ # state of the resource.
+ json_output.update(resource_info)
+ json_output.update(module.params)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py
new file mode 100644
index 00000000..ff29b56d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_lb
+short_description: create/destroy GCE load-balancer resources
+description:
+ - This module can create and destroy Google Compute Engine C(loadbalancer)
+ and C(httphealthcheck) resources. The primary LB resource is the
+ C(load_balancer) resource and the health check parameters are all
+ prefixed with I(httphealthcheck).
+ The full documentation for Google Compute Engine load balancing is at
+ U(https://developers.google.com/compute/docs/load-balancing/). However,
+ the ansible module simplifies the configuration by following the
+ libcloud model.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ httphealthcheck_name:
+ type: str
+ description:
+ - the name identifier for the HTTP health check
+ httphealthcheck_port:
+ type: int
+ description:
+ - the TCP port to use for HTTP health checking
+ default: 80
+ httphealthcheck_path:
+ type: str
+ description:
+ - the url path to use for HTTP health checking
+ default: "/"
+ httphealthcheck_interval:
+ type: int
+ description:
+ - the duration in seconds between each health check request
+ default: 5
+ httphealthcheck_timeout:
+ type: int
+ description:
+ - the timeout in seconds before a request is considered a failed check
+ default: 5
+ httphealthcheck_unhealthy_count:
+ type: int
+ description:
+ - number of consecutive failed checks before marking a node unhealthy
+ default: 2
+ httphealthcheck_healthy_count:
+ type: int
+ description:
+ - number of consecutive successful checks before marking a node healthy
+ default: 2
+ httphealthcheck_host:
+ type: str
+ description:
+ - host header to pass through on HTTP check requests
+ name:
+ type: str
+ description:
+ - name of the load-balancer resource
+ protocol:
+ type: str
+ description:
+ - the protocol used for the load-balancer packet forwarding, tcp or udp
+ - "the available choices are: C(tcp) or C(udp)."
+ default: "tcp"
+ region:
+ type: str
+ description:
+ - the GCE region where the load-balancer is defined
+ external_ip:
+ type: str
+ description:
+ - the external static IPv4 (or auto-assigned) address for the LB
+ port_range:
+ type: str
+ description:
+ - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
+ members:
+ type: list
+ description:
+ - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
+ state:
+ type: str
+ description:
+ - desired state of the LB
+ - "the available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple example of creating a new LB, adding members, and a health check
+ local_action:
+ module: gce_lb
+ name: testlb
+ region: us-central1
+ members: ["us-central1-a/www-a", "us-central1-b/www-b"]
+ httphealthcheck_name: hc
+ httphealthcheck_port: 80
+ httphealthcheck_path: "/up"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.loadbalancer.types import Provider as Provider_lb
+ from libcloud.loadbalancer.providers import get_driver as get_driver_lb
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ httphealthcheck_name=dict(),
+ httphealthcheck_port=dict(default=80, type='int'),
+ httphealthcheck_path=dict(default='/'),
+ httphealthcheck_interval=dict(default=5, type='int'),
+ httphealthcheck_timeout=dict(default=5, type='int'),
+ httphealthcheck_unhealthy_count=dict(default=2, type='int'),
+ httphealthcheck_healthy_count=dict(default=2, type='int'),
+ httphealthcheck_host=dict(),
+ name=dict(),
+ protocol=dict(default='tcp'),
+ region=dict(),
+ external_ip=dict(),
+ port_range=dict(),
+ members=dict(type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
+
+ gce = gce_connect(module)
+
+ httphealthcheck_name = module.params.get('httphealthcheck_name')
+ httphealthcheck_port = module.params.get('httphealthcheck_port')
+ httphealthcheck_path = module.params.get('httphealthcheck_path')
+ httphealthcheck_interval = module.params.get('httphealthcheck_interval')
+ httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
+ httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count')
+ httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count')
+ httphealthcheck_host = module.params.get('httphealthcheck_host')
+ name = module.params.get('name')
+ protocol = module.params.get('protocol')
+ region = module.params.get('region')
+ external_ip = module.params.get('external_ip')
+ port_range = module.params.get('port_range')
+ members = module.params.get('members')
+ state = module.params.get('state')
+
+ try:
+ gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
+ gcelb.connection.user_agent_append("%s/%s" % (
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION))
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ changed = False
+ json_output = {'name': name, 'state': state}
+
+ if not name and not httphealthcheck_name:
+ module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False)
+
+ if state in ['active', 'present']:
+ # first, create the httphealthcheck if requested
+ hc = None
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
+ host=httphealthcheck_host, path=httphealthcheck_path,
+ port=httphealthcheck_port,
+ interval=httphealthcheck_interval,
+ timeout=httphealthcheck_timeout,
+ unhealthy_threshold=httphealthcheck_unhealthy_count,
+ healthy_threshold=httphealthcheck_healthy_count)
+ changed = True
+ except ResourceExistsError:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if hc is not None:
+ json_output['httphealthcheck_host'] = hc.extra['host']
+ json_output['httphealthcheck_path'] = hc.path
+ json_output['httphealthcheck_port'] = hc.port
+ json_output['httphealthcheck_interval'] = hc.interval
+ json_output['httphealthcheck_timeout'] = hc.timeout
+ json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold
+ json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold
+
+ # create the forwarding rule (and target pool under the hood)
+ lb = None
+ if name:
+ if not region:
+ module.fail_json(msg='Missing required region name',
+ changed=False)
+ nodes = []
+ output_nodes = []
+ json_output['name'] = name
+ # members is a python list of 'zone/inst' strings
+ if members:
+ for node in members:
+ try:
+ zone, node_name = node.split('/')
+ nodes.append(gce.ex_get_node(node_name, zone))
+ output_nodes.append(node)
+ except Exception:
+ # skip nodes that are badly formatted or don't exist
+ pass
+ try:
+ if hc is not None:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_healthchecks=[hc],
+ ex_address=external_ip)
+ else:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_address=external_ip)
+ changed = True
+ except ResourceExistsError:
+ lb = gcelb.get_balancer(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if lb is not None:
+ json_output['members'] = output_nodes
+ json_output['protocol'] = protocol
+ json_output['region'] = region
+ json_output['external_ip'] = lb.ip
+ json_output['port_range'] = lb.port
+ hc_names = []
+ if 'healthchecks' in lb.extra:
+ for hc in lb.extra['healthchecks']:
+ hc_names.append(hc.name)
+ json_output['httphealthchecks'] = hc_names
+
+ if state in ['absent', 'deleted']:
+ # first, delete the load balancer (forwarding rule and target pool)
+ # if specified.
+ if name:
+ json_output['name'] = name
+ try:
+ lb = gcelb.get_balancer(name)
+ gcelb.destroy_balancer(lb)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # destroy the health check if specified
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ gce.ex_destroy_healthcheck(hc)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py
new file mode 100644
index 00000000..fd47167f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py
@@ -0,0 +1,904 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ type: str
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ size:
+ type: int
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - GCE project ID
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ type: str
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ type: dict
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ named_ports:
+ type: list
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ recreate_instances:
+ type: bool
+ default: no
+ description:
+ - Recreate MIG instances.
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Resize MIG
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Delete MIG
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: str
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: str
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: str
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: int
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=None):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ filter_list = ['NONE'] if filter_list is None else filter_list
+
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py
new file mode 100644
index 00000000..48395f2a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_net
+short_description: create/destroy GCE networks and firewall rules
+description:
+ - This module can create and destroy Google Compute Engine networks and
+ firewall rules U(https://cloud.google.com/compute/docs/networking).
+ The I(name) parameter is reserved for referencing a network while the
+ I(fwname) parameter is used to reference firewall rules.
+ IPv4 Address ranges must be specified using the CIDR
+ U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ allowed:
+ type: str
+ description:
+ - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
+ this parameter is mandatory when creating or updating a firewall rule
+ ipv4_range:
+ type: str
+ description:
+ - the IPv4 address range in CIDR notation for the network
+ this parameter is not mandatory when you specified existing network in name parameter,
+ but when you create new network, this parameter is mandatory
+ fwname:
+ type: str
+ description:
+ - name of the firewall rule
+ name:
+ type: str
+ description:
+ - name of the network
+ src_range:
+ type: list
+ description:
+ - the source IPv4 address range in CIDR notation
+ default: []
+ src_tags:
+ type: list
+ description:
+ - the source instance tags for creating a firewall rule
+ default: []
+ target_tags:
+ type: list
+ description:
+ - the target instance tags for creating a firewall rule
+ default: []
+ state:
+ type: str
+ description:
+ - desired state of the network or firewall
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use C(credentials_file).
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ mode:
+ type: str
+ description:
+ - network mode for Google Cloud
+ C(legacy) indicates a network with an IP address range;
+ C(auto) automatically generates subnetworks in different regions;
+ C(custom) uses networks to group subnets of user specified IP address ranges
+ https://cloud.google.com/compute/docs/networking#network_types
+ default: "legacy"
+ choices: ["legacy", "auto", "custom"]
+ subnet_name:
+ type: str
+ description:
+ - name of subnet to create
+ subnet_region:
+ type: str
+ description:
+ - region of subnet to create
+ subnet_desc:
+ type: str
+ description:
+ - description of subnet to create
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a 'legacy' Network
+- name: Create Legacy Network
+ community.google.gce_net:
+ name: legacynet
+ ipv4_range: '10.24.17.0/24'
+ mode: legacy
+ state: present
+
+# Create an 'auto' Network
+- name: Create Auto Network
+ community.google.gce_net:
+ name: autonet
+ mode: auto
+ state: present
+
+# Create a 'custom' Network
+- name: Create Custom Network
+ community.google.gce_net:
+ name: customnet
+ mode: custom
+ subnet_name: "customsubnet"
+ subnet_region: us-east1
+ ipv4_range: '10.240.16.0/24'
+ state: "present"
+
+# Create Firewall Rule with Source Tags
+- name: Create Firewall Rule w/Source Tags
+ community.google.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_tags: "foo,bar"
+
+# Create Firewall Rule with Source Range
+- name: Create Firewall Rule w/Source Range
+ community.google.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_range: ['10.1.1.1/32']
+
+# Create Custom Subnetwork
+- name: Create Custom Subnetwork
+ community.google.gce_net:
+ name: privatenet
+ mode: custom
+ subnet_name: subnet_example
+ subnet_region: us-central1
+ ipv4_range: '10.0.0.0/16'
+'''
+
+RETURN = '''
+allowed:
+ description: Rules (ports and protocols) specified by this firewall rule.
+ returned: When specified
+ type: str
+ sample: "tcp:80;icmp"
+
+fwname:
+ description: Name of the firewall rule.
+ returned: When specified
+ type: str
+ sample: "my-fwname"
+
+ipv4_range:
+ description: IPv4 range of the specified network or subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "10.0.0.0/16"
+
+name:
+ description: Name of the network.
+ returned: always
+ type: str
+ sample: "my-network"
+
+src_range:
+ description: IP address blocks a firewall rule applies to.
+ returned: when specified
+ type: list
+ sample: [ '10.1.1.12/8' ]
+
+src_tags:
+ description: Instance Tags firewall rule applies to.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+
+state:
+ description: State of the item operated on.
+ returned: always
+ type: str
+ sample: "present"
+
+subnet_name:
+ description: Name of the subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "my-subnetwork"
+
+subnet_region:
+ description: Region of the specified subnet.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "us-east1"
+
+target_tags:
+ description: Instance Tags with these tags receive traffic allowed by firewall rule.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+'''
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def format_allowed_section(allowed):
+ """Format each section of the allowed list"""
+ if allowed.count(":") == 0:
+ protocol = allowed
+ ports = []
+ elif allowed.count(":") == 1:
+ protocol, ports = allowed.split(":")
+ else:
+ return []
+ if ports.count(","):
+ ports = ports.split(",")
+ elif ports:
+ ports = [ports]
+ return_val = {"IPProtocol": protocol}
+ if ports:
+ return_val["ports"] = ports
+ return return_val
+
+
+def format_allowed(allowed):
+ """Format the 'allowed' value so that it is GCE compatible."""
+ return_value = []
+ if allowed.count(";") == 0:
+ return [format_allowed_section(allowed)]
+ else:
+ sections = allowed.split(";")
+ for section in sections:
+ return_value.append(format_allowed_section(section))
+ return return_value
+
+
+def sorted_allowed_list(allowed_list):
+ """Sort allowed_list (output of format_allowed) by protocol and port."""
+ # sort by protocol
+ allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
+ # sort the ports list
+ return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ allowed=dict(),
+ ipv4_range=dict(),
+ fwname=dict(),
+ name=dict(),
+ src_range=dict(default=[], type='list'),
+ src_tags=dict(default=[], type='list'),
+ target_tags=dict(default=[], type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
+ subnet_name=dict(),
+ subnet_region=dict(),
+ subnet_desc=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ allowed = module.params.get('allowed')
+ ipv4_range = module.params.get('ipv4_range')
+ fwname = module.params.get('fwname')
+ name = module.params.get('name')
+ src_range = module.params.get('src_range')
+ src_tags = module.params.get('src_tags')
+ target_tags = module.params.get('target_tags')
+ state = module.params.get('state')
+ mode = module.params.get('mode')
+ subnet_name = module.params.get('subnet_name')
+ subnet_region = module.params.get('subnet_region')
+ subnet_desc = module.params.get('subnet_desc')
+
+ changed = False
+ json_output = {'state': state}
+
+ if state in ['active', 'present']:
+ network = None
+ subnet = None
+ try:
+ network = gce.ex_get_network(name)
+ json_output['name'] = name
+ if mode == 'legacy':
+ json_output['ipv4_range'] = network.cidr
+ if network and mode == 'custom' and subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = subnet.cidr
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants to create a new network that doesn't yet exist
+ if name and not network:
+ if not ipv4_range and mode != 'auto':
+ module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
+ changed=False)
+ args = [ipv4_range if mode == 'legacy' else None]
+ kwargs = {}
+ if mode != 'legacy':
+ kwargs['mode'] = mode
+
+ try:
+ network = gce.ex_create_network(name, *args, **kwargs)
+ json_output['name'] = name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except TypeError:
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
+ if not hasattr(gce, 'ex_create_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ if not subnet_name or not ipv4_range or not subnet_region:
+ module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
+
+ try:
+ subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=changed)
+
+ if fwname:
+ # user creating a firewall rule
+ if not allowed and not src_range and not src_tags:
+ if changed and network:
+ module.fail_json(
+ msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
+ module.fail_json(
+ msg="Missing required firewall rule parameter(s)",
+ changed=False)
+
+ allowed_list = format_allowed(allowed)
+
+ # Fetch existing rule and if it exists, compare attributes
+ # update if attributes changed. Create if doesn't exist.
+ try:
+ fw_changed = False
+ fw = gce.ex_get_firewall(fwname)
+
+ # If old and new attributes are different, we update the firewall rule.
+ # This implicitly lets us clear out attributes as well.
+ # allowed_list is required and must not be None for firewall rules.
+ if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
+ fw.allowed = allowed_list
+ fw_changed = True
+
+ # source_ranges might not be set in the project; cast it to an empty list
+ fw.source_ranges = fw.source_ranges or []
+
+ # If these attributes are lists, we sort them first, then compare.
+ # Otherwise, we update if they differ.
+ if fw.source_ranges != src_range:
+ if isinstance(src_range, list):
+ if sorted(fw.source_ranges) != sorted(src_range):
+ fw.source_ranges = src_range
+ fw_changed = True
+ else:
+ fw.source_ranges = src_range
+ fw_changed = True
+
+ # source_tags might not be set in the project; cast it to an empty list
+ fw.source_tags = fw.source_tags or []
+
+ if fw.source_tags != src_tags:
+ if isinstance(src_tags, list):
+ if sorted(fw.source_tags) != sorted(src_tags):
+ fw.source_tags = src_tags
+ fw_changed = True
+ else:
+ fw.source_tags = src_tags
+ fw_changed = True
+
+ # target_tags might not be set in the project; cast it to an empty list
+ fw.target_tags = fw.target_tags or []
+
+ if fw.target_tags != target_tags:
+ if isinstance(target_tags, list):
+ if sorted(fw.target_tags) != sorted(target_tags):
+ fw.target_tags = target_tags
+ fw_changed = True
+ else:
+ fw.target_tags = target_tags
+ fw_changed = True
+
+ if fw_changed is True:
+ try:
+ gce.ex_update_firewall(fw)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # Firewall rule not found so we try to create it.
+ except ResourceNotFoundError:
+ try:
+ gce.ex_create_firewall(fwname, allowed_list, network=name,
+ source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['fwname'] = fwname
+ json_output['allowed'] = allowed
+ json_output['src_range'] = src_range
+ json_output['src_tags'] = src_tags
+ json_output['target_tags'] = target_tags
+
+ if state in ['absent', 'deleted']:
+ if fwname:
+ json_output['fwname'] = fwname
+ fw = None
+ try:
+ fw = gce.ex_get_firewall(fwname)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if fw:
+ gce.ex_destroy_firewall(fw)
+ changed = True
+ elif subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ json_output['name'] = subnet_name
+ subnet = None
+ try:
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if subnet:
+ gce.ex_destroy_subnetwork(subnet)
+ changed = True
+ elif name:
+ json_output['name'] = name
+ network = None
+ try:
+ network = gce.ex_get_network(name)
+
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if network:
+ try:
+ gce.ex_destroy_network(network)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py
new file mode 100644
index 00000000..0e3093d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_pd
+short_description: utilize GCE persistent disk resources
+description:
+ - This module can create and destroy unformatted GCE persistent disks
+ U(https://developers.google.com/compute/docs/disks#persistentdisks).
+ It also supports attaching and detaching disks from running instances.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ detach_only:
+ description:
+ - do not destroy the disk, merely detach it from an instance
+ type: bool
+ instance_name:
+ type: str
+ description:
+ - instance name if you wish to attach or detach the disk
+ mode:
+ type: str
+ description:
+ - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
+ default: "READ_ONLY"
+ choices: ["READ_WRITE", "READ_ONLY"]
+ name:
+ type: str
+ description:
+ - name of the disk
+ required: true
+ size_gb:
+ type: str
+ description:
+ - whole integer size of disk (in GB) to create, default is 10 GB
+ default: "10"
+ image:
+ type: str
+ description:
+ - the source image to use for the disk
+ snapshot:
+ type: str
+ description:
+ - the source snapshot to use for the disk
+ state:
+ type: str
+ description:
+ - desired state of the persistent disk
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ zone:
+ type: str
+ description:
+ - zone in which to create the disk
+ default: "us-central1-b"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ default: "pd-standard"
+ delete_on_termination:
+ description:
+ - If C(yes), deletes the volume when instance is terminated
+ type: bool
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple attachment action to an existing instance
+ local_action:
+ module: gce_pd
+ instance_name: notlocalhost
+ size_gb: 5
+ name: pd
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ delete_on_termination=dict(type='bool'),
+ detach_only=dict(type='bool'),
+ instance_name=dict(),
+ mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
+ name=dict(required=True),
+ size_gb=dict(default=10),
+ disk_type=dict(default='pd-standard'),
+ image=dict(),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ snapshot=dict(),
+ state=dict(default='present'),
+ zone=dict(default='us-central1-b'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ detach_only = module.params.get('detach_only')
+ instance_name = module.params.get('instance_name')
+ mode = module.params.get('mode')
+ name = module.params.get('name')
+ size_gb = module.params.get('size_gb')
+ disk_type = module.params.get('disk_type')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+
+ if delete_on_termination and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when requesting delete on termination',
+ changed=False)
+
+ if detach_only and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when detaching a disk',
+ changed=False)
+
+ disk = inst = None
+ changed = is_attached = False
+
+ json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type}
+ if detach_only:
+ json_output['detach_only'] = True
+ json_output['detached_from_instance'] = instance_name
+
+ if instance_name:
+ # user wants to attach/detach from an existing instance
+ try:
+ inst = gce.ex_get_node(instance_name, zone)
+ # is the disk attached?
+ for d in inst.extra['disks']:
+ if d['deviceName'] == name:
+ is_attached = True
+ json_output['attached_mode'] = d['mode']
+ json_output['attached_to_instance'] = inst.name
+ except Exception:
+ pass
+
+ # find disk if it already exists
+ try:
+ disk = gce.ex_get_volume(name)
+ json_output['size_gb'] = int(disk.size)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants a disk to exist. If "instance_name" is supplied the user
+ # also wants it attached
+ if state in ['active', 'present']:
+
+ if not size_gb:
+ module.fail_json(msg="Must supply a size_gb", changed=False)
+ try:
+ size_gb = int(round(float(size_gb)))
+ if size_gb < 1:
+ raise Exception
+ except Exception:
+ module.fail_json(msg="Must supply a size_gb larger than 1 GB",
+ changed=False)
+
+ if instance_name and inst is None:
+ module.fail_json(msg='Instance %s does not exist in zone %s' % (
+ instance_name, zone), changed=False)
+
+ if not disk:
+ if image is not None and snapshot is not None:
+ module.fail_json(
+ msg='Cannot give both image (%s) and snapshot (%s)' % (
+ image, snapshot), changed=False)
+ lc_image = None
+ lc_snapshot = None
+ if image_family is not None:
+ lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects)
+ elif image is not None:
+ lc_image = gce.ex_get_image(image, ex_project_list=external_projects)
+ elif snapshot is not None:
+ lc_snapshot = gce.ex_get_snapshot(snapshot)
+ try:
+ disk = gce.create_volume(
+ size_gb, name, location=zone, image=lc_image,
+ snapshot=lc_snapshot, ex_disk_type=disk_type)
+ except ResourceExistsError:
+ pass
+ except QuotaExceededError:
+ module.fail_json(msg='Requested disk size exceeds quota',
+ changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['size_gb'] = size_gb
+ if image is not None:
+ json_output['image'] = image
+ if snapshot is not None:
+ json_output['snapshot'] = snapshot
+ changed = True
+ if inst and not is_attached:
+ try:
+ gce.attach_volume(inst, disk, device=name, ex_mode=mode,
+ ex_auto_delete=delete_on_termination)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['attached_to_instance'] = inst.name
+ json_output['attached_mode'] = mode
+ if delete_on_termination:
+ json_output['delete_on_termination'] = True
+ changed = True
+
+ # user wants to delete a disk (or perhaps just detach it).
+ if state in ['absent', 'deleted'] and disk:
+
+ if inst and is_attached:
+ try:
+ gce.detach_volume(disk, ex_node=inst)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+ if not detach_only:
+ try:
+ gce.destroy_volume(disk)
+ except ResourceInUseError as e:
+ module.fail_json(msg=str(e.value), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py
new file mode 100644
index 00000000..6723f464
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_snapshot
+short_description: Create or destroy snapshots for GCE storage volumes
+description:
+ - Manages snapshots for GCE instances. This module manages snapshots for
+ the storage volumes of a GCE compute instance. If there are multiple
+ volumes, each snapshot will be prepended with the disk name
+options:
+ instance_name:
+ type: str
+ description:
+ - The GCE instance to snapshot
+ required: True
+ snapshot_name:
+ type: str
+ description:
+ - The name of the snapshot to manage
+ required: True
+ disks:
+ type: list
+ description:
+ - A list of disks to create snapshots for. If none is provided,
+ all of the volumes will have snapshots created.
+ required: False
+ state:
+ type: str
+ description:
+ - Whether a snapshot should be C(present) or C(absent)
+ required: false
+ default: present
+ choices: [present, absent]
+ service_account_email:
+ type: str
+ description:
+ - GCP service account email for the project where the instance resides
+ credentials_file:
+ type: path
+ description:
+ - The path to the credentials file associated with the service account
+ project_id:
+ type: str
+ description:
+ - The GCP project ID to use
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+author: Rob Wagner (@robwagner33)
+'''
+
+EXAMPLES = '''
+- name: Create gce snapshot
+ community.google.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+- name: Delete gce snapshot
+ community.google.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: absent
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+# This example creates snapshots for only two of the available disks as
+# disk0-example-snapshot and disk1-example-snapshot
+- name: Create snapshots of specific disks
+ community.google.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ disks:
+ - disk0
+ - disk1
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+'''
+
+RETURN = '''
+snapshots_created:
+ description: List of newly created snapshots
+ returned: When snapshots are created
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_deleted:
+ description: List of destroyed snapshots
+ returned: When snapshots are deleted
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_existing:
+ description: List of snapshots that already existed (no-op)
+ returned: When snapshots were already present
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_absent:
+ description: List of snapshots that were already absent (no-op)
+ returned: When snapshots were already absent
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+def find_snapshot(volume, name):
+ '''
+ Check if there is a snapshot already created with the given name for
+ the passed in volume.
+
+ Args:
+ volume: A gce StorageVolume object to manage
+ name: The name of the snapshot to look for
+
+ Returns:
+ The VolumeSnapshot object if one is found
+ '''
+ found_snapshot = None
+ snapshots = volume.list_snapshots()
+ for snapshot in snapshots:
+ if name == snapshot.name:
+ found_snapshot = snapshot
+ return found_snapshot
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ snapshot_name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ disks=dict(default=None, type='list'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ instance_name = module.params.get('instance_name')
+ snapshot_name = module.params.get('snapshot_name')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+
+ json_output = dict(
+ changed=False,
+ snapshots_created=[],
+ snapshots_deleted=[],
+ snapshots_existing=[],
+ snapshots_absent=[]
+ )
+
+ snapshot = None
+
+ instance = gce.ex_get_node(instance_name, 'all')
+ instance_disks = instance.extra['disks']
+
+ for instance_disk in instance_disks:
+ disk_snapshot_name = snapshot_name
+ disk_info = gce._get_components_from_path(instance_disk['source'])
+ device_name = disk_info['name']
+ device_zone = disk_info['zone']
+ if disks is None or device_name in disks:
+ volume_obj = gce.ex_get_volume(device_name, device_zone)
+
+ # If we have more than one disk to snapshot, prepend the disk name
+ if len(instance_disks) > 1:
+ disk_snapshot_name = device_name + "-" + disk_snapshot_name
+
+ snapshot = find_snapshot(volume_obj, disk_snapshot_name)
+
+ if snapshot and state == 'present':
+ json_output['snapshots_existing'].append(disk_snapshot_name)
+
+ elif snapshot and state == 'absent':
+ snapshot.destroy()
+ json_output['changed'] = True
+ json_output['snapshots_deleted'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'present':
+ volume_obj.snapshot(disk_snapshot_name)
+ json_output['changed'] = True
+ json_output['snapshots_created'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'absent':
+ json_output['snapshots_absent'].append(disk_snapshot_name)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py
new file mode 100644
index 00000000..4af31863
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+short_description: add or remove tag(s) to/from GCE instances
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
+ to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone.
+options:
+ instance_name:
+ type: str
+ description:
+ - The name of the GCE instance to add/remove tags.
+ - Required if C(instance_pattern) is not specified.
+ instance_pattern:
+ type: str
+ description:
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
+ - If C(instance_name) is not specified, this field is required.
+ tags:
+ type: list
+ description:
+ - Comma-separated list of tags to add or remove.
+ required: yes
+ state:
+ type: str
+ description:
+ - Desired state of the tags.
+ choices: [ absent, present ]
+ default: present
+ zone:
+ type: str
+ description:
+ - The zone of the disk specified by source.
+ default: us-central1-a
+ service_account_email:
+ type: str
+ description:
+ - Service account email.
+ pem_file:
+ type: path
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - Your GCE project ID.
+requirements:
+ - python >= 2.6
+ - apache-libcloud >= 0.17.0
+notes:
+ - Either I(instance_name) or I(instance_pattern) is required.
+author:
+ - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com>
+ - Tom Melendez (@supertom)
+'''
+
+EXAMPLES = '''
+- name: Add tags to instance
+ community.google.gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+- name: Remove tags from instance in default zone (us-central1-a)
+ community.google.gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+- name: Add tags to instances in zone that match pattern
+ community.google.gce_tag:
+ instance_pattern: test-server-*
+ tags: foo,bar
+ zone: us-central1-a
+ state: present
+'''
+
+import re
+import traceback
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+def _union_items(baselist, comparelist):
+ """Combine two lists, removing duplicates."""
+ return list(set(baselist) | set(comparelist))
+
+
+def _intersect_items(baselist, comparelist):
+ """Return matching items in both lists."""
+ return list(set(baselist) & set(comparelist))
+
+
+def _get_changed_items(baselist, comparelist):
+ """Return changed items as they relate to baselist."""
+ return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
+
+
+def modify_tags(gce, module, node, tags, state='present'):
+ """Modify tags on an instance."""
+
+ existing_tags = node.extra['tags']
+ tags = [x.lower() for x in tags]
+ tags_changed = []
+
+ if state == 'absent':
+ # tags changed are any that intersect
+ tags_changed = _intersect_items(existing_tags, tags)
+ if not tags_changed:
+ return False, None
+ # update instance with tags in existing tags that weren't specified
+ node_tags = _get_changed_items(existing_tags, tags)
+ else:
+ # tags changed are any that in the new list that weren't in existing
+ tags_changed = _get_changed_items(tags, existing_tags)
+ if not tags_changed:
+ return False, None
+ # update instance with the combined list
+ node_tags = _union_items(existing_tags, tags)
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(type='str'),
+ instance_pattern=dict(type='str'),
+ tags=dict(type='list', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ zone=dict(type='str', default='us-central1-a'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ project_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['instance_name', 'instance_pattern']
+ ],
+ required_one_of=[
+ ['instance_name', 'instance_pattern']
+ ],
+ )
+
+ instance_name = module.params.get('instance_name')
+ instance_pattern = module.params.get('instance_pattern')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ # Create list of nodes to operate on
+ matching_nodes = []
+ try:
+ if instance_pattern:
+ instances = gce.list_nodes(ex_zone=zone)
+ # no instances in zone
+ if not instances:
+ module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
+ try:
+ # Python regex fully supported: https://docs.python.org/2/library/re.html
+ p = re.compile(instance_pattern)
+ matching_nodes = [i for i in instances if p.search(i.name) is not None]
+ except re.error as e:
+ module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
+ else:
+ matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
+
+ # Tag nodes
+ instance_pattern_matches = []
+ tags_changed = []
+ for node in matching_nodes:
+ changed, tags_changed = modify_tags(gce, module, node, tags, state)
+ if changed:
+ instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
+ if instance_pattern:
+ module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
+ else:
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py
new file mode 100644
index 00000000..2d9230c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcpubsub
+short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub
+description:
+ - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+requirements:
+ - google-auth >= 0.5.0
+ - google-cloud-pubsub >= 0.22.0
+notes:
+ - Subscription pull happens before publish. You cannot publish and pull in the same task.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name.
+ - Only the name, not the full path, is required.
+ required: yes
+ subscription:
+ type: dict
+ description:
+ - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
+ suboptions:
+ name:
+ description:
+ - Subfield of subscription. Required if subscription is specified. See examples.
+ ack_deadline:
+ description:
+ - Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
+ pull:
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the
+ provided subscription name. max_messages (int; default None; max number of messages to pull),
+ message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged,
+ changed is set to True, otherwise, changed is False.
+ push_endpoint:
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ publish:
+ type: list
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ state:
+ type: str
+ description:
+ - State of the topic or queue.
+ - Applies to the most granular resource.
+ - If subscription isspecified we remove it.
+ - If only topic is specified, that is what is removed.
+ - NOTE - A topic can be removed without first removing the subscription.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+# (Message will be pushed; there is no check to see if the message was pushed before
+- name: Create a topic and publish a message to it
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ state: present
+
+# Subscriptions associated with topic are not deleted.
+- name: Delete Topic
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ state: absent
+
+# Setting absent will keep the messages from being sent
+- name: Publish multiple messages, with attributes (key:value available with the message)
+ community.google.gcpubsub:
+ topic: '{{ topic_name }}'
+ state: present
+ publish:
+ - message: this is message 1
+ attributes:
+ mykey1: myvalue
+ mykey2: myvalu2
+ mykey3: myvalue3
+ - message: this is message 2
+ attributes:
+ server: prod
+ sla: "99.9999"
+ owner: fred
+
+- name: Create Subscription (pull)
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: present
+
+# pull is default, ack_deadline is not required
+- name: Create Subscription with ack_deadline and push endpoint
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ ack_deadline: "60"
+ push_endpoint: http://pushendpoint.example.com
+ state: present
+
+# Setting push_endpoint to "None" converts subscription to pull.
+- name: Subscription change from push to pull
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: mysub
+ push_endpoint: "None"
+
+### Topic will not be deleted
+- name: Delete subscription
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: absent
+
+# only pull keyword is required.
+- name: Pull messages from subscription
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: ansible-topic-example-sub
+ pull:
+ message_ack: yes
+ max_messages: "100"
+'''
+
+RETURN = '''
+publish:
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ returned: Only when specified
+ type: list
+ sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
+
+pulled_messages:
+ description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
+ returned: Only when subscription.pull is specified
+ type: list
+ sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
+
+state:
+ description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+subscription:
+ description: Name of subscription.
+ returned: When subscription fields are specified
+ type: str
+ sample: "mysubscription"
+
+topic:
+ description: Name of topic.
+ returned: Always
+ type: str
+ sample: "mytopic"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+CLOUD_CLIENT = 'google-cloud-pubsub'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
+
+
+def publish_messages(message_list, topic):
+ with topic.batch() as batch:
+ for message in message_list:
+ msg = message['message']
+ attrs = {}
+ if 'attributes' in message:
+ attrs = message['attributes']
+ batch.publish(bytes(msg), **attrs)
+ return True
+
+
+def pull_messages(pull_params, sub):
+ """
+ :rtype: tuple (output, changed)
+ """
+ changed = False
+ max_messages = pull_params.get('max_messages', None)
+ message_ack = pull_params.get('message_ack', 'no')
+ return_immediately = pull_params.get('return_immediately', False)
+
+ output = []
+ pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages)
+
+ for ack_id, msg in pulled:
+ msg_dict = {'message_id': msg.message_id,
+ 'attributes': msg.attributes,
+ 'data': msg.data,
+ 'ack_id': ack_id}
+ output.append(msg_dict)
+
+ if message_ack:
+ ack_ids = [m['ack_id'] for m in output]
+ if ack_ids:
+ sub.acknowledge(ack_ids)
+ changed = True
+ return (output, changed)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ topic=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ publish=dict(type='list'),
+ subscription=dict(type='dict'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['publish'] = module.params.get('publish')
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['subscription'] = module.params.get('subscription')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
+
+ changed = False
+ json_output = {}
+
+ t = None
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ s = None
+ if mod_params['subscription']:
+ # Note: default ack deadline cannot be changed without deleting/recreating subscription
+ s = t.subscription(mod_params['subscription']['name'],
+ ack_deadline=mod_params['subscription'].get('ack_deadline', None),
+ push_endpoint=mod_params['subscription'].get('push_endpoint', None))
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If subscription is specified
+ # we remove it. If only topic is specified, that is what is removed.
+ # Note that a topic can be removed without first removing the subscription.
+ # TODO(supertom): Enhancement: Provide an option to only delete a topic
+ # if there are no subscriptions associated with it (which the API does not support).
+ if s is not None:
+ if s.exists():
+ s.delete()
+ changed = True
+ else:
+ if t.exists():
+ t.delete()
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not t.exists():
+ t.create()
+ changed = True
+ if s:
+ if not s.exists():
+ s.create()
+ s.reload()
+ changed = True
+ else:
+ # Subscription operations
+ # TODO(supertom): if more 'update' operations arise, turn this into a function.
+ s.reload()
+ push_endpoint = mod_params['subscription'].get('push_endpoint', None)
+ if push_endpoint is not None:
+ if push_endpoint != s.push_endpoint:
+ if push_endpoint == 'None':
+ push_endpoint = None
+ s.modify_push_configuration(push_endpoint=push_endpoint)
+ s.reload()
+ changed = push_endpoint == s.push_endpoint
+
+ if 'pull' in mod_params['subscription']:
+ if s.push_endpoint is not None:
+ module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
+ (json_output['pulled_messages'], changed) = pull_messages(
+ mod_params['subscription']['pull'], s)
+
+ # publish messages to the topic
+ if mod_params['publish'] and len(mod_params['publish']) > 0:
+ changed = publish_messages(mod_params['publish'], t)
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py
new file mode 100644
index 00000000..1feac1e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.google.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.google.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.google.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.google.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.google') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()