summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/google
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/google
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/google')
-rw-r--r--collections-debian-merged/ansible_collections/community/google/.github/workflows/ansible-test.yml110
-rw-r--r--collections-debian-merged/ansible_collections/community/google/CHANGELOG.rst23
-rw-r--r--collections-debian-merged/ansible_collections/community/google/FILES.json418
-rw-r--r--collections-debian-merged/ansible_collections/community/google/LICENSE674
-rw-r--r--collections-debian-merged/ansible_collections/community/google/MANIFEST.json32
-rw-r--r--collections-debian-merged/ansible_collections/community/google/README.md38
-rw-r--r--collections-debian-merged/ansible_collections/community/google/changelogs/changelog.yaml17
-rw-r--r--collections-debian-merged/ansible_collections/community/google/changelogs/config.yaml29
-rw-r--r--collections-debian-merged/ansible_collections/community/google/changelogs/fragments/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/community/google/meta/runtime.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py39
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py799
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py497
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py605
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py904
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py511
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.ini76
-rw-r--r--collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.10.txt22
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.11.txt22
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.9.txt11
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/compat/mock.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/compat/unittest.py38
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_auth.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_utils.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/test_gce_tag.py66
-rw-r--r--collections-debian-merged/ansible_collections/community/google/tests/unit/requirements.txt1
41 files changed, 8695 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/google/.github/workflows/ansible-test.yml b/collections-debian-merged/ansible_collections/community/google/.github/workflows/ansible-test.yml
new file mode 100644
index 00000000..652b774a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/.github/workflows/ansible-test.yml
@@ -0,0 +1,110 @@
+# README FIRST
+# 1. replace "NAMESPACE" and "COLLECTION_NAME" with the correct name in the env section (e.g. with 'community' and 'mycollection')
+# 2. If you don't have unit tests remove that section
+# 3. If your collection depends on other collections ensure they are installed, see "Install collection dependencies"
+# If you need help please ask in #ansible-devel on Freenode IRC
+
+name: CI
+on:
+ # Run CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ pull_request:
+ schedule:
+ - cron: '0 6 * * *'
+env:
+ NAMESPACE: community
+ COLLECTION_NAME: google
+
+jobs:
+
+###
+# Sanity tests (REQUIRED)
+#
+# https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html
+
+ sanity:
+ name: Sanity (Ⓐ${{ matrix.ansible }})
+ strategy:
+ matrix:
+ ansible:
+ # It's important that Sanity is tested against all stable-X.Y branches
+ # Testing against `devel` may fail as new tests are added.
+ # - stable-2.9 # Only if your collection supports Ansible 2.9
+ - stable-2.10
+ - devel
+ runs-on: ubuntu-latest
+ steps:
+
+ # ansible-test requires the collection to be in a directory in the form
+ # .../ansible_collections/${{env.NAMESPACE}}/${{env.COLLECTION_NAME}}/
+
+ - name: Check out code
+ uses: actions/checkout@v2
+ with:
+ path: ansible_collections/${{env.NAMESPACE}}/${{env.COLLECTION_NAME}}
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ # it is just required to run that once as "ansible-test sanity" in the docker image
+ # will run on all python versions it supports.
+ python-version: 3.8
+
+ # Install the head of the given branch (devel, stable-2.10)
+ - name: Install ansible-base (${{ matrix.ansible }})
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ # run ansible-test sanity inside of Docker.
+ # The docker container has all the pinned dependencies that are required
+ # and all python versions ansible supports.
+ - name: Run sanity tests
+ run: ansible-test sanity --docker -v --color
+ working-directory: ./ansible_collections/${{env.NAMESPACE}}/${{env.COLLECTION_NAME}}
+
+###
+# Unit tests (OPTIONAL)
+#
+# https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html
+
+ units:
+ runs-on: ubuntu-latest
+ name: Units (Ⓐ${{ matrix.ansible }})
+ strategy:
+ # As soon as the first unit test fails, cancel the others to free up the CI queue
+ fail-fast: true
+ matrix:
+ ansible:
+ # - stable-2.9 # Only if your collection supports Ansible 2.9
+ - stable-2.10
+ - devel
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+ with:
+ path: ansible_collections/${{env.NAMESPACE}}/${{env.COLLECTION_NAME}}
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ # it is just required to run that once as "ansible-test units" in the docker image
+ # will run on all python versions it supports.
+ python-version: 3.8
+
+ - name: Install ansible-base (${{ matrix.ansible }})
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ # Run the unit tests
+ - name: Run unit test
+ run: ansible-test units -v --color --docker --coverage
+ working-directory: ./ansible_collections/${{env.NAMESPACE}}/${{env.COLLECTION_NAME}}
+
+ # ansible-test support producing code coverage date
+ - name: Generate coverage report
+ run: ansible-test coverage xml -v --requirements --group-by command --group-by version
+ working-directory: ./ansible_collections/${{env.NAMESPACE}}/${{env.COLLECTION_NAME}}
+
+ # See the reports at https://codecov.io/gh/GITHUBORG/REPONAME
+ - uses: codecov/codecov-action@v1
+ with:
+ fail_ci_if_error: false
diff --git a/collections-debian-merged/ansible_collections/community/google/CHANGELOG.rst b/collections-debian-merged/ansible_collections/community/google/CHANGELOG.rst
new file mode 100644
index 00000000..8e33c3d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/CHANGELOG.rst
@@ -0,0 +1,23 @@
+==============================
+community.google Release Notes
+==============================
+
+.. contents:: Topics
+
+
+v1.0.0
+======
+
+Release Summary
+---------------
+
+This is the first stable release version of community.google.
+
+v0.1.0
+======
+
+Release Summary
+---------------
+
+This is the first pre-release version of community.google after migrating from community.general.
+
diff --git a/collections-debian-merged/ansible_collections/community/google/FILES.json b/collections-debian-merged/ansible_collections/community/google/FILES.json
new file mode 100644
index 00000000..ff7fa022
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/FILES.json
@@ -0,0 +1,418 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42d8f0ee470ee5760bc195e06fb35ba0d4cf1d2f0663ee1f866d997cb0159bdc",
+ "format": 1
+ },
+ {
+ "name": "scripts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/gce.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "925cfa8728fa00c71d851d13203fc6c9bfe87f96c0528029a0e909a602a0004c",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/gce.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2fd208cca342e3e91d6711bd781bc2658715bf4201fa7d190be8d5ad35dc423",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8bc332492969db761f788580155f30f1f57382a5f952cf1a9c920b627843d6f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19432d0cf6054eff46a50aa1cd9300a3e6bfe1ea118a9906096312daf7ee58aa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f0279cfc003046e5f8e05851e12a8ef2b55d3770dcf9d585376afc5e6477b67",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gce_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f66f884d5d9a5f4ae7b34ece88d47071e2598a29915b5950e8c957986c80ed10",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "513d336e4fec3987f8bb1946f889e91126eae6e661e64359ecec85e69c00f23c",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9914221531e94c75983a9624dcaf8914fc459ba8d0e261ce1462135f300b5617",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "513d336e4fec3987f8bb1946f889e91126eae6e661e64359ecec85e69c00f23c",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a85bdf5808a57d1860fd128e8bf93be1ec8009022d0a0e8fdc1af3732c2e8a9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gce.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "282fc5e4c59582e8ea24ecb406ec7e5f14953a8d305002fbacda680811628d32",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/_gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31581014875f54e52e1ece32efe3b581b175c7bec2d939571d892daf2302219d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gc_storage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6390536b4af26888d58eea69390ed7590c637e83aa7fe278da7a776267517416",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_net.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7346ffb330fd4a4aca1937a1bdc9647e117c02e2dd701b72f1e46859107e0fb5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcpubsub.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04636fac7225cdd6a5ca34ea471ad19749c14afc17ec6642c58b66f688d9205a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_img.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac082df31a9c7bc156c0698c4175e5f593bad90f634f27b60653542de122e12d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe08d5ac7faee9f7a8033b19ab4dccc5c76c46a4524c83a935dd89856bcd4c01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_instance_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "536e39871eba316c2264690c0ff26a2732b2be8f31215a03bd65d36dc7638480",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db83ddf7998f915e702851bfa122817a0d99f2e60c257f1899f639b2186faa49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_pd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d3cb555bf6b2095751eb4d5a984c9eab8bcbe6e6073c33749d15a80e976b3eb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_eip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc2585ec223fdbac121d7ba6e711e5bdbb574102d63779d17b429b7630fa3771",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_mig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dd037f1fd3a58af68217a4d213cde47dd835730e09fe840ab2b0aa4a6df6a2e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_labels.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "419e6fc92f59717da395a4d70c778d18475940431517071e8c1ed051d3694bc4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcpubsub_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ef79767bdeee07d33aefe342ab4298f5d38c091ed5c58f6872d550d3b0f1f92",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_lb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54de796b84bf1308d370b1c015b0e968d48e0f821e7ae382a63b0f61ff4d87f2",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/gcp_storage_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "969de4dedeb2b6be2f3fe6c6072bdc4481f4b2e427e72eac03eb6c5359634a2b",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48a6a9e80e1629dbccaaa278f83493de9b19ca26a4334e7b114adc86f014332f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d18c3fb08d51d89b581031b5079624a86306457a85ba48938c27152e38881fe9",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b0634ab394e8ced5d60b2532bfa8ed14cec4c7d4739c9e0277c760c5543c0e1",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f5a70b361b5f1a356efddb0abde59de7d80197b7302859dd7155c7ad4e43b64",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57d072bb0337cf78a96fd4e2ee58f0e8d57e21da29bd9642adfbaed25556549c",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/google/LICENSE b/collections-debian-merged/ansible_collections/community/google/LICENSE
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/collections-debian-merged/ansible_collections/community/google/MANIFEST.json b/collections-debian-merged/ansible_collections/community/google/MANIFEST.json
new file mode 100644
index 00000000..aab8d078
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/MANIFEST.json
@@ -0,0 +1,32 @@
+{
+ "collection_info": {
+ "namespace": "community",
+ "name": "google",
+ "version": "1.0.0",
+ "authors": [
+ "Google"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "google",
+ "gce",
+ "gcp"
+ ],
+ "description": "Google Cloud Platform community collection",
+ "license": [],
+ "license_file": "LICENSE",
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/community.google",
+ "documentation": null,
+ "homepage": "https://github.com/ansible-collections/community.google",
+ "issues": "https://github.com/ansible-collections/community.google/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dfe9e97ce5637373663dd885af32c1deb5c10352c3440296526b3b4fe5d401c",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/google/README.md b/collections-debian-merged/ansible_collections/community/google/README.md
new file mode 100644
index 00000000..27122af0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/README.md
@@ -0,0 +1,38 @@
+# community.google Ansible collection
+[![CI](https://github.com/ansible-collections/community.google/workflows/CI/badge.svg?event=push)](https://github.com/ansible-collections/community.google/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.google)](https://codecov.io/gh/ansible-collections/community.google)
+
+## External requirements
+
+- boto
+- libcloud
+- google-auth
+
+## Included content
+
+See: https://docs.ansible.com/ansible/latest/collections/community/google/
+
+## Using this collection
+
+See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
+
+## Release notes
+
+See the [changelog](https://github.com/ansible-collections/community.google/tree/main/CHANGELOG.rst).
+
+## More information
+
+<!-- List out where the user can find additional information, such as working group meeting times, slack/IRC channels, or documentation for the product this collection automates. At a minimum, link to: -->
+
+- [Ansible Collection overview](https://github.com/ansible-collections/overview)
+- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
+- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
+- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst)
+- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
+- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420)
+- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45)
+
+## Licensing
+
+GNU General Public License v3.0 or later.
+
+See [LICENSE](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text.
diff --git a/collections-debian-merged/ansible_collections/community/google/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/community/google/changelogs/changelog.yaml
new file mode 100644
index 00000000..13fbdc44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/changelogs/changelog.yaml
@@ -0,0 +1,17 @@
+ancestor: null
+releases:
+ 0.1.0:
+ changes:
+ release_summary: 'This is the first pre-release version of community.google
+ after migrating from community.general.
+
+ '
+ fragments:
+ - 0.1.0.yml
+ release_date: '2020-11-30'
+ 1.0.0:
+ changes:
+ release_summary: This is the first stable release version of community.google.
+ fragments:
+ - 1.0.0.yaml
+ release_date: '2020-12-22'
diff --git a/collections-debian-merged/ansible_collections/community/google/changelogs/config.yaml b/collections-debian-merged/ansible_collections/community/google/changelogs/config.yaml
new file mode 100644
index 00000000..038e5eb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/changelogs/config.yaml
@@ -0,0 +1,29 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: community.google
+trivial_section_name: trivial
diff --git a/collections-debian-merged/ansible_collections/community/google/changelogs/fragments/.keep b/collections-debian-merged/ansible_collections/community/google/changelogs/fragments/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/changelogs/fragments/.keep
diff --git a/collections-debian-merged/ansible_collections/community/google/meta/runtime.yml b/collections-debian-merged/ansible_collections/community/google/meta/runtime.yml
new file mode 100644
index 00000000..a9d40b8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/meta/runtime.yml
@@ -0,0 +1,9 @@
+---
+requires_ansible: '>=2.9.10'
+plugin_routing:
+ modules:
+ gcpubsub_facts:
+ redirect: community.google.gcpubsub_info
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: Use community.google.gcpubsub_info instead.
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py b/collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py
new file mode 100644
index 00000000..06872543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/doc_fragments/_gcp.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # GCP doc fragment.
+ DOCUMENTATION = r'''
+options:
+ project:
+ description:
+ - The Google Cloud Platform project to use.
+ type: str
+ auth_kind:
+ description:
+ - The type of credential used.
+ type: str
+ required: true
+ choices: [ application, machineaccount, serviceaccount ]
+ service_account_contents:
+ description:
+ - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
+ type: jsonarg
+ service_account_file:
+ description:
+ - The path of a Service Account JSON file if serviceaccount is selected as type.
+ type: path
+ service_account_email:
+ description:
+ - An optional service account email address if machineaccount is selected
+ and the user does not wish to use the default email.
+ type: str
+ scopes:
+ description:
+ - Array of scopes to be used.
+ type: list
+ elements: str
+ env_type:
+ description:
+ - Specifies which Ansible environment you're running this module within.
+ - This should not be set unless you know what you're doing.
+ - This only alters the User Agent string for any API requests.
+ type: str
+notes:
+ - for authentication, you can set service_account_file using the
+ c(gcp_service_account_file) env variable.
+ - for authentication, you can set service_account_contents using the
+ c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
+ - For authentication, you can set service_account_email using the
+ C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
+ - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
+ variable.
+ - For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+ - The I(service_account_email) and I(service_account_file) options are
+ mutually exclusive.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py b/collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py
new file mode 100644
index 00000000..ae58c5c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/lookup/gcp_storage_file.py
@@ -0,0 +1,156 @@
+# (c) 2019, Eric Anderson <eric.sysmin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: gcp_storage_file
+description:
+ - This lookup returns the contents from a file residing on Google Cloud Storage
+short_description: Return GC Storage content
+author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
+requirements:
+ - python >= 2.6
+ - requests >= 2.18.4
+ - google-auth >= 1.3.0
+options:
+ src:
+ description:
+ - Source location of file (may be local machine or cloud depending on action).
+ required: false
+ bucket:
+ description:
+ - The name of the bucket.
+ required: false
+extends_documentation_fragment:
+- community.google._gcp
+
+'''
+
+EXAMPLES = '''
+- ansible.builtin.debug:
+ msg: |
+ the value of foo.txt is {{ lookup('community.google.gcp_storage_file',
+ bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
+ auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
+'''
+
+RETURN = '''
+_raw:
+ description:
+ - base64 encoded file content
+ type: list
+ elements: str
+'''
+
+import base64
+import json
+import mimetypes
+import os
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession
+ HAS_GOOGLE_CLOUD_COLLECTION = True
+except ImportError:
+ HAS_GOOGLE_CLOUD_COLLECTION = False
+
+
+display = Display()
+
+
+class GcpMockModule(object):
+ def __init__(self, params):
+ self.params = params
+
+ def fail_json(self, *args, **kwargs):
+ raise AnsibleError(kwargs['msg'])
+
+ def raise_for_status(self, response):
+ try:
+ response.raise_for_status()
+ except getattr(requests.exceptions, 'RequestException'):
+ self.fail_json(msg="GCP returned error: %s" % response.json())
+
+
+class GcpFileLookup():
+ def get_file_contents(self, module):
+ auth = GcpSession(module, 'storage')
+ data = auth.get(self.media_link(module))
+ return base64.b64encode(data.content.rstrip())
+
+ def fetch_resource(self, module, link, allow_not_found=True):
+ auth = GcpSession(module, 'storage')
+ return self.return_if_object(module, auth.get(link), allow_not_found)
+
+ def self_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
+
+ def media_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
+
+ def return_if_object(self, module, response, allow_not_found=False):
+ # If not found, return nothing.
+ if allow_not_found and response.status_code == 404:
+ return None
+ # If no content, return nothing.
+ if response.status_code == 204:
+ return None
+ try:
+ module.raise_for_status(response)
+ result = response.json()
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
+ raise AnsibleError("Invalid JSON response with error: %s" % inst)
+ if navigate_hash(result, ['error', 'errors']):
+ raise AnsibleError(navigate_hash(result, ['error', 'errors']))
+ return result
+
+ def object_headers(self, module):
+ return {
+ "name": module.params['src'],
+ "Content-Type": mimetypes.guess_type(module.params['src'])[0],
+ "Content-Length": str(os.path.getsize(module.params['src'])),
+ }
+
+ def run(self, terms, variables=None, **kwargs):
+ params = {
+ 'bucket': kwargs.get('bucket', None),
+ 'src': kwargs.get('src', None),
+ 'projects': kwargs.get('projects', None),
+ 'scopes': kwargs.get('scopes', None),
+ 'zones': kwargs.get('zones', None),
+ 'auth_kind': kwargs.get('auth_kind', None),
+ 'service_account_file': kwargs.get('service_account_file', None),
+ 'service_account_email': kwargs.get('service_account_email', None),
+ }
+
+ if not params['scopes']:
+ params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
+
+ fake_module = GcpMockModule(params)
+
+ # Check if files exist.
+ remote_object = self.fetch_resource(fake_module, self.self_link(fake_module))
+ if not remote_object:
+ raise AnsibleError("File does not exist in bucket")
+
+ result = self.get_file_contents(fake_module)
+ return [result]
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_GOOGLE_CLOUD_COLLECTION:
+ raise AnsibleError("community.google.gcp_storage_file needs a supported version of the google.cloud collection installed")
+ if not HAS_REQUESTS:
+ raise AnsibleError("community.google.gcp_storage_file needs requests installed. Use `pip install requests` to install it")
+ return GcpFileLookup().run(terms, variables=variables, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py
new file mode 100644
index 00000000..7698e3c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gce.py
@@ -0,0 +1,39 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+from ansible_collections.community.google.plugins.module_utils.gcp import gcp_connect
+from ansible_collections.community.google.plugins.module_utils.gcp import unexpected_error_msg as gcp_error
+
+USER_AGENT_PRODUCT = "Ansible-gce"
+USER_AGENT_VERSION = "v1"
+
+
+def gce_connect(module, provider=None):
+ """Return a GCP connection for Google Compute Engine."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+ provider = provider or Provider.GCE
+
+ return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return gcp_error(error)
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py
new file mode 100644
index 00000000..a034f3b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/module_utils/gcp.py
@@ -0,0 +1,799 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+# libcloud
+try:
+ import libcloud
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+# google-auth
+try:
+ import google.auth
+ from google.oauth2 import service_account
+ HAS_GOOGLE_AUTH = True
+except ImportError:
+ HAS_GOOGLE_AUTH = False
+
+# google-python-api
+try:
+ import google_auth_httplib2
+ from httplib2 import Http
+ from googleapiclient.http import set_user_agent
+ from googleapiclient.errors import HttpError
+ from apiclient.discovery import build
+ HAS_GOOGLE_API_LIB = True
+except ImportError:
+ HAS_GOOGLE_API_LIB = False
+
+
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
+
+
+def _get_gcp_ansible_credentials(module):
+ """Helper to fetch creds from AnsibleModule object."""
+ service_account_email = module.params.get('service_account_email', None)
+ # Note: pem_file is discouraged and will be deprecated
+ credentials_file = module.params.get('pem_file', None) or module.params.get(
+ 'credentials_file', None)
+ project_id = module.params.get('project_id', None)
+
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_environ_var(var_name, default_value):
+ """Wrapper around os.environ.get call."""
+ return os.environ.get(
+ var_name, default_value)
+
+
+def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
+ """Helper to look in environment variables for credentials."""
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ if not service_account_email:
+ service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
+ if not credentials_file:
+ credentials_file = _get_gcp_environ_var(
+ 'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
+ 'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
+ 'GCE_PEM_FILE_PATH', None)
+ if not project_id:
+ project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
+ 'GOOGLE_CLOUD_PROJECT', None)
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
+ """
+ Obtain GCP credentials by trying various methods.
+
+ There are 3 ways to specify GCP credentials:
+ 1. Specify via Ansible module parameters (recommended).
+ 2. Specify via environment variables. Two sets of env vars are available:
+ a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
+ b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
+ using p12 key)
+ 3. Specify via libcloud secrets.py file (deprecated).
+
+ There are 3 helper functions to assist in the above.
+
+ Regardless of method, the user also has the option of specifying a JSON
+ file or a p12 file as the credentials file. JSON is strongly recommended and
+ p12 will be removed in the future.
+
+ Additionally, flags may be set to require valid json and check the libcloud
+ version.
+
+ AnsibleModule.fail_json is called only if the project_id cannot be found.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param require_valid_json: If true, require credentials to be valid JSON. Default is True.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :return: {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+ :rtype: ``dict``
+ """
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_ansible_credentials(module)
+
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_environment_credentials(service_account_email,
+ credentials_file, project_id)
+
+ if credentials_file is None or project_id is None or service_account_email is None:
+ if check_libcloud is True:
+ if project_id is None:
+ # TODO(supertom): this message is legacy and integration tests
+ # depend on it.
+ module.fail_json(msg='Missing GCE connection parameters in libcloud '
+ 'secrets file.')
+ else:
+ if project_id is None:
+ module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
+ 'credentials file (%s)' % (project_id, credentials_file)))
+ # Set these fields to empty strings if they are None
+ # consumers of this will make the distinction between an empty string
+ # and None.
+ if credentials_file is None:
+ credentials_file = ''
+ if service_account_email is None:
+ service_account_email = ''
+
+ # ensure the credentials file is found and is in the proper format.
+ if credentials_file:
+ _validate_credentials_file(module, credentials_file,
+ require_valid_json=require_valid_json,
+ check_libcloud=check_libcloud)
+
+ return {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+
+
+def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
+ """
+ Check for valid credentials file.
+
+ Optionally check for JSON format and if libcloud supports JSON.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param credentials_file: path to file on disk
+ :type credentials_file: ``str``. Complete path to file on disk.
+
+ :param require_valid_json: This argument is ignored as of Ansible 2.7.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :returns: True
+ :rtype: ``bool``
+ """
+ try:
+ # Try to read credentials as JSON
+ with open(credentials_file) as credentials:
+ json.loads(credentials.read())
+ # If the credentials are proper JSON and we do not have the minimum
+ # required libcloud version, bail out and return a descriptive
+ # error
+ if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
+ module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
+ 'Upgrade to libcloud>=0.17.0.')
+ return True
+ except IOError as e:
+ module.fail_json(msg='GCP Credentials File %s not found.' %
+ credentials_file, changed=False)
+ return False
+ except ValueError as e:
+ module.fail_json(
+ msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console',
+ changed=False)
+
+
+def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
+ """Return a Google libcloud driver connection."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+
+ creds = _get_gcp_credentials(module,
+ require_valid_json=False,
+ check_libcloud=True)
+ try:
+ gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
+ datacenter=module.params.get('zone', None),
+ project=creds['project_id'])
+ gcp.connection.user_agent_append("%s/%s" % (
+ user_agent_product, user_agent_version))
+ except (RuntimeError, ValueError) as e:
+ module.fail_json(msg=str(e), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ return gcp
+
+
+def get_google_cloud_credentials(module, scopes=None):
+ """
+ Get credentials object for use with Google Cloud client.
+
+ Attempts to obtain credentials by calling _get_gcp_credentials. If those are
+ not present will attempt to connect via Application Default Credentials.
+
+ To connect via libcloud, don't use this function, use gcp_connect instead. For
+ Google Python API Client, see get_google_api_auth for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google Cloud example:
+ creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type module: ``list`` of URIs
+
+ :returns: A tuple containing (google authorized) credentials object and
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_AUTH:
+ module.fail_json(msg='Please install google-auth.')
+
+ conn_params = _get_gcp_credentials(module,
+ require_valid_json=True,
+ check_libcloud=False)
+ try:
+ if conn_params['credentials_file']:
+ credentials = service_account.Credentials.from_service_account_file(
+ conn_params['credentials_file'])
+ if scopes:
+ credentials = credentials.with_scopes(scopes)
+ else:
+ (credentials, project_id) = google.auth.default(
+ scopes=scopes)
+ if project_id is not None:
+ conn_params['project_id'] = project_id
+
+ return (credentials, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'):
+ """
+ Authentication for use with google-python-api-client.
+
+ Function calls get_google_cloud_credentials, which attempts to assemble the credentials
+ from various locations. Next it attempts to authenticate with Google.
+
+ This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
+
+ For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
+ get_google_cloud_credentials for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google API example:
+ http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
+ service = build('myservice', 'v1', http=http_auth)
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type scopes: ``list`` of URIs
+
+ :param user_agent_product: User agent product. eg: 'ansible-python-api'
+ :type user_agent_product: ``str``
+
+ :param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
+ :type user_agent_version: ``str``
+
+ :returns: A tuple containing (google authorized) httplib2 request object and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_API_LIB:
+ module.fail_json(msg="Please install google-api-python-client library")
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+ try:
+ (credentials, conn_params) = get_google_cloud_credentials(module, scopes)
+ http = set_user_agent(Http(), '%s-%s' %
+ (user_agent_product, user_agent_version))
+ http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
+
+ return (http_auth, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_client(module, service, user_agent_product, user_agent_version,
+ scopes=None, api_version='v1'):
+ """
+ Get the discovery-based python client. Use when a cloud client is not available.
+
+ client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ :returns: A tuple containing the authorized client to the specified service and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+
+ http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
+ user_agent_product=user_agent_product,
+ user_agent_version=user_agent_version)
+ client = build(service, api_version, http=http_auth)
+
+ return (client, conn_params)
+
+
+def check_min_pkg_version(pkg_name, minimum_version):
+ """Minimum required version is >= installed version."""
+ from pkg_resources import get_distribution
+ try:
+ installed_version = get_distribution(pkg_name).version
+ return LooseVersion(installed_version) >= minimum_version
+ except Exception as e:
+ return False
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
+
+
+def get_valid_location(module, driver, location, location_type='zone'):
+ if location_type == 'zone':
+ l = driver.ex_get_zone(location)
+ else:
+ l = driver.ex_get_region(location)
+ if l is None:
+ link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
+ module.fail_json(msg=('%s %s is invalid. Please see the list of '
+ 'available %s at %s' % (
+ location_type, location, location_type, link)),
+ changed=False)
+ return l
+
+
+def check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if 'required' in d and d['required'] is True:
+ raise ValueError(("%s is required and must be of type: %s" %
+ (d['name'], str(d['type']))))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
+ d['name'], str(d['type']), params[d['name']],
+ type(params[d['name']]))))
+ if 'values' in d:
+ if params[d['name']] not in d['values']:
+ raise ValueError(("%s must be one of: %s" % (
+ d['name'], ','.join(d['values']))))
+ if isinstance(params[d['name']], int):
+ if 'min' in d:
+ if params[d['name']] < d['min']:
+ raise ValueError(("%s must be greater than or equal to: %s" % (
+ d['name'], d['min'])))
+ if 'max' in d:
+ if params[d['name']] > d['max']:
+ raise ValueError("%s must be less than or equal to: %s" % (
+ d['name'], d['max']))
+ return True
+
+
+class GCPUtils(object):
+ """
+ Helper utilities for GCP.
+ """
+
+ @staticmethod
+ def underscore_to_camel(txt):
+ return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
+
+ @staticmethod
+ def remove_non_gcp_params(params):
+ """
+ Remove params if found.
+ """
+ params_to_remove = ['state']
+ for p in params_to_remove:
+ if p in params:
+ del params[p]
+
+ return params
+
+ @staticmethod
+ def params_to_gcp_dict(params, resource_name=None):
+ """
+ Recursively convert ansible params to GCP Params.
+
+ Keys are converted from snake to camelCase
+ ex: default_service to defaultService
+
+ Handles lists, dicts and strings
+
+ special provision for the resource name
+ """
+ if not isinstance(params, dict):
+ return params
+ gcp_dict = {}
+ params = GCPUtils.remove_non_gcp_params(params)
+ for k, v in params.items():
+ gcp_key = GCPUtils.underscore_to_camel(k)
+ if isinstance(v, dict):
+ retval = GCPUtils.params_to_gcp_dict(v)
+ gcp_dict[gcp_key] = retval
+ elif isinstance(v, list):
+ gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
+ else:
+ if resource_name and k == resource_name:
+ gcp_dict['name'] = v
+ else:
+ gcp_dict[gcp_key] = v
+ return gcp_dict
+
+ @staticmethod
+ def execute_api_client_req(req, client=None, raw=True,
+ operation_timeout=180, poll_interval=5,
+ raise_404=True):
+ """
+ General python api client interaction function.
+
+ For use with google-api-python-client, or clients created
+ with get_google_api_client function
+ Not for use with Google Cloud client libraries
+
+ For long-running operations, we make an immediate query and then
+ sleep poll_interval before re-querying. After the request is done
+ we rebuild the request with a get method and return the result.
+
+ """
+ try:
+ resp = req.execute()
+
+ if not resp:
+ return None
+
+ if raw:
+ return resp
+
+ if resp['kind'] == 'compute#operation':
+ resp = GCPUtils.execute_api_client_operation_req(req, resp,
+ client,
+ operation_timeout,
+ poll_interval)
+
+ if 'items' in resp:
+ return resp['items']
+
+ return resp
+ except HttpError as h:
+ # Note: 404s can be generated (incorrectly) for dependent
+ # resources not existing. We let the caller determine if
+ # they want 404s raised for their invocation.
+ if h.resp.status == 404 and not raise_404:
+ return None
+ else:
+ raise
+ except Exception:
+ raise
+
+ @staticmethod
+ def execute_api_client_operation_req(orig_req, op_resp, client,
+ operation_timeout=180, poll_interval=5):
+ """
+ Poll an operation for a result.
+ """
+ parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
+ project_id = parsed_url['project']
+ resource_name = GCPUtils.get_gcp_resource_from_methodId(
+ orig_req.methodId)
+ resource = GCPUtils.build_resource_from_name(client, resource_name)
+
+ start_time = time.time()
+
+ complete = False
+ attempts = 1
+ while not complete:
+ if start_time + operation_timeout >= time.time():
+ op_req = client.globalOperations().get(
+ project=project_id, operation=op_resp['name'])
+ op_resp = op_req.execute()
+ if op_resp['status'] != 'DONE':
+ time.sleep(poll_interval)
+ attempts += 1
+ else:
+ complete = True
+ if op_resp['operationType'] == 'delete':
+ # don't wait for the delete
+ return True
+ elif op_resp['operationType'] in ['insert', 'update', 'patch']:
+ # TODO(supertom): Isolate 'build-new-request' stuff.
+ resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
+ resource_name)
+ if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
+ parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
+ 'entity_name']
+ args = {'project': project_id,
+ resource_name_singular: parsed_url['entity_name']}
+ new_req = resource.get(**args)
+ resp = new_req.execute()
+ return resp
+ else:
+ # assuming multiple entities, do a list call.
+ new_req = resource.list(project=project_id)
+ resp = new_req.execute()
+ return resp
+ else:
+ # operation didn't complete on time.
+ raise GCPOperationTimeoutError("Operation timed out: %s" % (
+ op_resp['targetLink']))
+
+ @staticmethod
+ def build_resource_from_name(client, resource_name):
+ try:
+ method = getattr(client, resource_name)
+ return method()
+ except AttributeError:
+ raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
+ client))
+
+ @staticmethod
+ def get_gcp_resource_from_methodId(methodId):
+ try:
+ parts = methodId.split('.')
+ if len(parts) != 3:
+ return None
+ else:
+ return parts[1]
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def get_entity_name_from_resource_name(resource_name):
+ if not resource_name:
+ return None
+
+ try:
+ # Chop off global or region prefixes
+ if resource_name.startswith('global'):
+ resource_name = resource_name.replace('global', '')
+ elif resource_name.startswith('regional'):
+ resource_name = resource_name.replace('region', '')
+
+ # ensure we have a lower case first letter
+ resource_name = resource_name[0].lower() + resource_name[1:]
+
+ if resource_name[-3:] == 'ies':
+ return resource_name.replace(
+ resource_name[-3:], 'y')
+ if resource_name[-1] == 's':
+ return resource_name[:-1]
+
+ return resource_name
+
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def parse_gcp_url(url):
+ """
+ Parse GCP urls and return dict of parts.
+
+ Supported URL structures:
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
+
+ :param url: GCP-generated URL, such as a selflink or resource location.
+ :type url: ``str``
+
+ :return: dictionary of parts. Includes stanard components of urlparse, plus
+ GCP-specific 'service', 'api_version', 'project' and
+ 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
+ and 'method_name', if applicable.
+ :rtype: ``dict``
+ """
+
+ p = urlparse.urlparse(url)
+ if not p:
+ return None
+ else:
+ # we add extra items such as
+ # zone, region and resource_name
+ url_parts = {}
+ url_parts['scheme'] = p.scheme
+ url_parts['host'] = p.netloc
+ url_parts['path'] = p.path
+ if p.path.find('/') == 0:
+ url_parts['path'] = p.path[1:]
+ url_parts['params'] = p.params
+ url_parts['fragment'] = p.fragment
+ url_parts['query'] = p.query
+ url_parts['project'] = None
+ url_parts['service'] = None
+ url_parts['api_version'] = None
+
+ path_parts = url_parts['path'].split('/')
+ url_parts['service'] = path_parts[0]
+ url_parts['api_version'] = path_parts[1]
+ if path_parts[2] == 'projects':
+ url_parts['project'] = path_parts[3]
+ else:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ if 'global' in path_parts:
+ url_parts['global'] = True
+ idx = path_parts.index('global')
+ if len(path_parts) - idx == 4:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+ url_parts['method_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx == 2:
+ url_parts['resource_name'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx < 2:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ elif 'regions' in path_parts or 'zones' in path_parts:
+ idx = -1
+ if 'regions' in path_parts:
+ idx = path_parts.index('regions')
+ url_parts['region'] = path_parts[idx + 1]
+ else:
+ idx = path_parts.index('zones')
+ url_parts['zone'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ else:
+ # no location in URL.
+ idx = path_parts.index('projects')
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ return url_parts
+
+ @staticmethod
+ def build_googleapi_url(project, api_version='v1', service='compute'):
+ return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
+
+ @staticmethod
+ def filter_gcp_fields(params, excluded_fields=None):
+ new_params = {}
+ if not excluded_fields:
+ excluded_fields = ['creationTimestamp', 'id', 'kind',
+ 'selfLink', 'fingerprint', 'description']
+
+ if isinstance(params, list):
+ new_params = [GCPUtils.filter_gcp_fields(
+ x, excluded_fields) for x in params]
+ elif isinstance(params, dict):
+ for k in params.keys():
+ if k not in excluded_fields:
+ new_params[k] = GCPUtils.filter_gcp_fields(
+ params[k], excluded_fields)
+ else:
+ new_params = params
+
+ return new_params
+
+ @staticmethod
+ def are_params_equal(p1, p2):
+ """
+ Check if two params dicts are equal.
+ TODO(supertom): need a way to filter out URLs, or they need to be built
+ """
+ filtered_p1 = GCPUtils.filter_gcp_fields(p1)
+ filtered_p2 = GCPUtils.filter_gcp_fields(p2)
+ if filtered_p1 != filtered_p2:
+ return False
+ return True
+
+
+class GCPError(Exception):
+ pass
+
+
+class GCPOperationTimeoutError(GCPError):
+ pass
+
+
+class GCPInvalidURLError(GCPError):
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py
new file mode 100644
index 00000000..8344c251
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gc_storage.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gc_storage
+short_description: This module manages objects/buckets in Google Cloud Storage.
+description:
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
+
+options:
+ bucket:
+ type: str
+ description:
+ - Bucket name.
+ required: true
+ object:
+ type: path
+ description:
+ - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
+ src:
+ type: str
+ description:
+ - The source file path when performing a PUT operation.
+ dest:
+ type: path
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ overwrite:
+ description:
+ - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ type: bool
+ default: 'yes'
+ aliases: [ 'force' ]
+ permission:
+ type: str
+ description:
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
+ default: private
+ choices: ['private', 'public-read', 'authenticated-read']
+ headers:
+ type: dict
+ description:
+ - Headers to attach to object.
+ default: {}
+ expiration:
+ type: int
+ default: 600
+ description:
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
+ aliases: [expiry]
+ mode:
+ type: str
+ description:
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
+ required: true
+ choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
+ gs_secret_key:
+ type: str
+ description:
+ - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used.
+ required: true
+ gs_access_key:
+ type: str
+ description:
+ - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used.
+ required: true
+ region:
+ type: str
+ description:
+ - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations)
+ default: 'US'
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ default: false
+
+requirements:
+ - "python >= 2.6"
+ - "boto >= 2.9"
+
+author:
+- Benno Joy (@bennojoy)
+- Lukas Beumer (@Nitaco)
+
+'''
+
+EXAMPLES = '''
+- name: Upload some content
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ community.google.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ community.google.gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ community.google.gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ community.google.gc_storage:
+ bucket: mybucket
+ mode: delete
+
+- name: Create a bucket with versioning enabled
+ community.google.gc_storage:
+ bucket: "mybucket"
+ versioning: yes
+ mode: create
+
+- name: Create a bucket located in the eu
+ community.google.gc_storage:
+ bucket: "mybucket"
+ region: "europe-west3"
+ mode: create
+
+'''
+
+import os
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def grant_check(module, gs, obj):
+ try:
+ acp = obj.get_acl()
+ if module.params.get('permission') == 'public-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ if not grant:
+ obj.set_acl('public-read')
+ module.exit_json(changed=True, result="The objects permission as been set to public-read")
+ if module.params.get('permission') == 'authenticated-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ if not grant:
+ obj.set_acl('authenticated-read')
+ module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ return True
+
+
+def key_check(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if key_check:
+ grant_check(module, gs, key_check)
+ return True
+ else:
+ return False
+
+
+def keysum(module, gs, bucket, obj):
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ return md5_remote
+
+
+def bucket_check(module, gs, bucket):
+ try:
+ result = gs.lookup(bucket)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if result:
+ grant_check(module, gs, result)
+ return True
+ else:
+ return False
+
+
+def create_bucket(module, gs, bucket):
+ try:
+ bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region'))
+ bucket.set_acl(module.params.get('permission'))
+ bucket.configure_versioning(module.params.get('versioning'))
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if bucket:
+ return True
+
+
+def delete_bucket(module, gs, bucket):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket_contents = bucket.list()
+ for key in bucket_contents:
+ bucket.delete_key(key.name)
+ bucket.delete()
+ return True
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_key(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket ", changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def create_dirkey(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def transform_headers(headers):
+ """
+ Boto url-encodes values unless we convert the value to `str`, so doing
+ this prevents 'max-age=100000' from being converted to "max-age%3D100000".
+
+ :param headers: Headers to convert
+ :type headers: dict
+ :rtype: dict
+
+ """
+
+ for key, value in headers.items():
+ headers[key] = str(value)
+ return headers
+
+
+def upload_gsfile(module, gs, bucket, obj, src, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(
+ filename=src,
+ headers=transform_headers(module.params.get('headers'))
+ )
+ key.set_acl(module.params.get('permission'))
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsfile(module, gs, bucket, obj, dest):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsstr(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def get_download_url(module, gs, bucket, obj, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def handle_get(module, gs, bucket, obj, overwrite, dest):
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(dest)
+ if md5_local == md5_remote:
+ module.exit_json(changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+ else:
+ download_gsfile(module, gs, bucket, obj, dest)
+
+
+def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
+ # Lets check to see if bucket exists to get ground truth.
+ bucket_rc = bucket_check(module, gs, bucket)
+ key_rc = key_check(module, gs, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucket_rc and key_rc:
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(src)
+ if md5_local == md5_remote:
+ module.exit_json(msg="Local and remote object are identical", changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+ else:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ if not bucket_rc:
+ create_bucket(module, gs, bucket)
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucket_rc and not key_rc:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+
+def handle_delete(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ if bucket and obj:
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, obj):
+ module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
+ else:
+ module.exit_json(msg="Object does not exist.", changed=False)
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ else:
+ module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
+
+
+def handle_create(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, gs, bucket, dirobj)
+ else:
+ create_bucket(module, gs, bucket)
+ create_dirkey(module, gs, bucket, dirobj)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default=False, type='bool')
+ ),
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade')
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
+ gs_secret_key = module.params.get('gs_secret_key')
+ gs_access_key = module.params.get('gs_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if mode == 'put':
+ if not src or not object:
+ module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
+ if mode == 'get':
+ if not dest or not object:
+ module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
+
+ try:
+ gs = boto.connect_gs(gs_access_key, gs_secret_key)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if mode == 'get':
+ if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
+ module.fail_json(msg="Target bucket/key cannot be found", failed=True)
+ if not path_check(dest):
+ download_gsfile(module, gs, bucket, obj, dest)
+ else:
+ handle_get(module, gs, bucket, obj, overwrite, dest)
+
+ if mode == 'put':
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ handle_put(module, gs, bucket, obj, overwrite, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ handle_delete(module, gs, bucket, obj)
+
+ if mode == 'create':
+ handle_create(module, gs, bucket, obj)
+
+ if mode == 'get_url':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ get_download_url(module, gs, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+ # --------------------------- Get the String contents of an Object -------------------------
+ if mode == 'get_str':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ download_gsstr(module, gs, bucket, obj)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py
new file mode 100644
index 00000000..a9ad45e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_eip.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gce_eip
+short_description: Create or Destroy Global or Regional External IP addresses.
+description:
+ - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
+ U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+notes:
+ - Global addresses can only be used with Global Forwarding Rules.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of Address.
+ required: true
+ region:
+ type: str
+ description:
+ - Region to create the address in. Set to 'global' to create a global address.
+ required: true
+ state:
+ type: str
+ description: The state the address should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+'''
+
+EXAMPLES = '''
+- name: Create a Global external IP address
+ community.google.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: global
+ state: present
+
+- name: Create a Regional external IP address
+ community.google.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: us-east1
+ state: present
+'''
+
+RETURN = '''
+address:
+ description: IP address being operated on
+ returned: always
+ type: str
+ sample: "35.186.222.233"
+name:
+ description: name of the address being operated on
+ returned: always
+ type: str
+ sample: "my-address"
+region:
+ description: Which region an address belongs.
+ returned: always
+ type: str
+ sample: "global"
+'''
+
+USER_AGENT_VERSION = 'v1'
+USER_AGENT_PRODUCT = 'Ansible-gce_eip'
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import gcp_connect
+
+
+def get_address(gce, name, region):
+ """
+ Get an Address from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Address.
+ :type name: ``str``
+
+ :return: A GCEAddress object or None.
+ :rtype: :class: `GCEAddress` or None
+ """
+ try:
+ return gce.ex_get_address(name=name, region=region)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_address(gce, params):
+ """
+ Create a new Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+
+ address = gce.ex_create_address(
+ name=params['name'], region=params['region'])
+
+ if address:
+ changed = True
+ return_data = address.address
+
+ return (changed, return_data)
+
+
+def delete_address(address):
+ """
+ Delete an Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+ if address.destroy():
+ changed = True
+ return_data = address.address
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ region=dict(required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (+0.19) required for this module.')
+
+ gce = gcp_connect(module, Provider.GCE, get_driver,
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['name'] = module.params.get('name')
+ params['region'] = module.params.get('region')
+
+ changed = False
+ json_output = {'state': params['state']}
+ address = get_address(gce, params['name'], region=params['region'])
+
+ if params['state'] == 'absent':
+ if not address:
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown address: %s" %
+ (params['name']))
+ else:
+ # Delete
+ (changed, json_output['address']) = delete_address(address)
+ else:
+ if not address:
+ # Create
+ (changed, json_output['address']) = create_address(gce,
+ params)
+ else:
+ changed = False
+ json_output['address'] = address.address
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py
new file mode 100644
index 00000000..9e53e1e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_img.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ type: str
+ description:
+ - the name of the image to create or delete
+ required: true
+ description:
+ type: str
+ description:
+ - an optional description
+ family:
+ type: str
+ description:
+ - an optional family name
+ source:
+ type: str
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ state:
+ type: str
+ description:
+ - desired state of the image
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ type: str
+ description:
+ - the zone of the disk specified by source
+ default: "us-central1-a"
+ timeout:
+ type: int
+ description:
+ - timeout for the operation
+ default: 180
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (@supertom)"
+'''
+
+EXAMPLES = '''
+- name: Create an image named test-image from the disk 'test-disk' in zone us-central1-a
+ community.google.gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+- name: Create an image named test-image from a tarball in Google Cloud Storage
+ community.google.gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+- name: Alternatively use the gs scheme
+ community.google.gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+- name: Delete an image named test-image
+ community.google.gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py
new file mode 100644
index 00000000..ec436b42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_instance_template.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_instance_template
+short_description: create or destroy instance templates of Compute Engine of GCP.
+description:
+ - Creates or destroy Google instance templates
+ of Compute Engine of Google Cloud Platform.
+options:
+ state:
+ type: str
+ description:
+ - The desired state for the instance template.
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ type: str
+ description:
+ - The name of the GCE instance template.
+ required: True
+ aliases: [base_name]
+ size:
+ type: str
+ description:
+ - The desired machine type for the instance template.
+ default: "f1-micro"
+ source:
+ type: str
+ description:
+ - A source disk to attach to the instance.
+ Cannot specify both I(image) and I(source).
+ image:
+ type: str
+ description:
+ - The image to use to create the instance.
+ Cannot specify both both I(image) and I(source).
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ default: debian-8
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ choices:
+ - pd-standard
+ - pd-ssd
+ default: pd-standard
+ disk_auto_delete:
+ description:
+ - Indicate that the boot disk should be
+ deleted when the Node is deleted.
+ default: true
+ type: bool
+ network:
+ type: str
+ description:
+ - The network to associate with the instance.
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - The Subnetwork resource name for this instance.
+ can_ip_forward:
+ description:
+ - Set to C(yes) to allow instance to
+ send/receive non-matching src/dst packets.
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - The external IP address to use.
+ If C(ephemeral), a new non-static address will be
+ used. If C(None), then no external address will
+ be used. To use an existing static IP address
+ specify address name.
+ default: "ephemeral"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ automatic_restart:
+ description:
+ - Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine.
+ type: bool
+ preemptible:
+ description:
+ - Defines whether the instance is preemptible.
+ type: bool
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ description:
+ type: str
+ description:
+ - description of instance template
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ nic_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted networkInterfaces[] structure.
+ disks_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted formatted disks[] structure. Case sensitive.
+ see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ subnetwork_region:
+ type: str
+ description:
+ - Region that subnetwork resides in. (Required for subnetwork to successfully complete)
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - JSON credentials strongly preferred.
+author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
+'''
+
+EXAMPLES = '''
+# Usage
+- name: Create instance template named foo
+ community.google.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "your-project-name"
+ credentials_file: "/path/to/your-key.json"
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+
+# Example Playbook
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.google.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ - name: Delete instance template
+ community.google.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: absent
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+
+# Example playbook using disks_gce_struct
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.google.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ disks_gce_struct:
+ - device_name: /dev/sda
+ boot: true
+ autoDelete: true
+ initializeParams:
+ diskSizeGb: 30
+ diskType: pd-ssd
+ sourceImage: projects/debian-cloud/global/images/family/debian-8
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+from ansible.module_utils._text import to_native
+
+
+def get_info(inst):
+ """Retrieves instance template information
+ """
+ return({
+ 'name': inst.name,
+ 'extra': inst.extra,
+ })
+
+
+def create_instance_template(module, gce):
+ """Create an instance template
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ # get info from module
+ name = module.params.get('name')
+ size = module.params.get('size')
+ source = module.params.get('source')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ disk_type = module.params.get('disk_type')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ subnetwork_region = module.params.get('subnetwork_region')
+ can_ip_forward = module.params.get('can_ip_forward')
+ external_ip = module.params.get('external_ip')
+ service_account_permissions = module.params.get(
+ 'service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+ on_host_maintenance = module.params.get('on_host_maintenance')
+ automatic_restart = module.params.get('automatic_restart')
+ preemptible = module.params.get('preemptible')
+ tags = module.params.get('tags')
+ metadata = module.params.get('metadata')
+ description = module.params.get('description')
+ disks_gce_struct = module.params.get('disks_gce_struct')
+ changed = False
+
+ # args of ex_create_instancetemplate
+ gce_args = dict(
+ name="instance",
+ size="f1-micro",
+ source=None,
+ image=None,
+ disk_type='pd-standard',
+ disk_auto_delete=True,
+ network='default',
+ subnetwork=None,
+ can_ip_forward=None,
+ external_ip='ephemeral',
+ service_accounts=None,
+ on_host_maintenance=None,
+ automatic_restart=None,
+ preemptible=None,
+ tags=None,
+ metadata=None,
+ description=None,
+ disks_gce_struct=None,
+ nic_gce_struct=None
+ )
+
+ gce_args['name'] = name
+ gce_args['size'] = size
+
+ if source is not None:
+ gce_args['source'] = source
+
+ if image:
+ gce_args['image'] = image
+ else:
+ if image_family:
+ image = gce.ex_get_image_from_family(image_family)
+ gce_args['image'] = image
+ else:
+ gce_args['image'] = "debian-8"
+
+ gce_args['disk_type'] = disk_type
+ gce_args['disk_auto_delete'] = disk_auto_delete
+
+ gce_network = gce.ex_get_network(network)
+ gce_args['network'] = gce_network
+
+ if subnetwork is not None:
+ gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
+
+ if can_ip_forward is not None:
+ gce_args['can_ip_forward'] = can_ip_forward
+
+ if external_ip == "ephemeral":
+ instance_external_ip = external_ip
+ elif external_ip == "none":
+ instance_external_ip = None
+ else:
+ try:
+ instance_external_ip = gce.ex_get_address(external_ip)
+ except GoogleBaseError as err:
+ # external_ip is name ?
+ instance_external_ip = external_ip
+ gce_args['external_ip'] = instance_external_ip
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP:
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email is not None:
+ ex_sa_perms.append({'email': str(service_account_email)})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+ gce_args['service_accounts'] = ex_sa_perms
+
+ if on_host_maintenance is not None:
+ gce_args['on_host_maintenance'] = on_host_maintenance
+
+ if automatic_restart is not None:
+ gce_args['automatic_restart'] = automatic_restart
+
+ if preemptible is not None:
+ gce_args['preemptible'] = preemptible
+
+ if tags is not None:
+ gce_args['tags'] = tags
+
+ if disks_gce_struct is not None:
+ gce_args['disks_gce_struct'] = disks_gce_struct
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+ gce_args['metadata'] = metadata
+
+ if description is not None:
+ gce_args['description'] = description
+
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ except ResourceNotFoundError:
+ try:
+ instance = gce.ex_create_instancetemplate(**gce_args)
+ changed = True
+ except GoogleBaseError as err:
+ module.fail_json(
+ msg='Unexpected error attempting to create instance {0}, error: {1}'
+ .format(
+ instance,
+ err.value
+ )
+ )
+
+ if instance:
+ json_data = get_info(instance)
+ else:
+ module.fail_json(msg="no instance template!")
+
+ return (changed, json_data, name)
+
+
+def delete_instance_template(module, gce):
+ """ Delete instance template.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ name = module.params.get('name')
+ current_state = "absent"
+ changed = False
+
+ # get instance template
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ json_data = dict(msg='instance template not exists: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state == "present":
+ rc = instance.destroy()
+ if rc:
+ changed = True
+ else:
+ module.fail_json(
+ msg='instance template destroy failed'
+ )
+
+ json_data = {}
+ return (changed, json_data, name)
+
+
+def module_controller(module, gce):
+ ''' Control module state parameter.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ nothing
+ Exit:
+ AnsibleModule object exit with json data.
+ '''
+ json_output = dict()
+ state = module.params.get("state")
+ if state == "present":
+ (changed, output, name) = create_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+ elif state == "absent":
+ (changed, output, name) = delete_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+
+ module.exit_json(**json_output)
+
+
+def check_if_system_state_would_be_changed(module, gce):
+ ''' check_if_system_state_would_be_changed !
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ system_state changed
+ '''
+ changed = False
+ current_state = "absent"
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+
+ try:
+ gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state != state:
+ changed = True
+
+ if current_state == "absent":
+ if changed:
+ output = 'instance template {0} will be created'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+ if current_state == "present":
+ if changed:
+ output = 'instance template {0} will be destroyed'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+
+ return (changed, output)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ name=dict(required=True, aliases=['base_name']),
+ size=dict(default='f1-micro'),
+ source=dict(),
+ image=dict(),
+ image_family=dict(default='debian-8'),
+ disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
+ disk_auto_delete=dict(type='bool', default=True),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ can_ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ automatic_restart=dict(type='bool', default=None),
+ preemptible=dict(type='bool', default=None),
+ tags=dict(type='list'),
+ metadata=dict(),
+ description=dict(),
+ disks=dict(type='list'),
+ nic_gce_struct=dict(type='list'),
+ project_id=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ subnetwork_region=dict(),
+ disks_gce_struct=dict(type='list')
+ ),
+ mutually_exclusive=[['source', 'image']],
+ required_one_of=[['image', 'image_family']],
+ supports_check_mode=True
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ try:
+ gce = gce_connect(module)
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
+
+ if module.check_mode:
+ (changed, output) = check_if_system_state_would_be_changed(module, gce)
+ module.exit_json(
+ changed=changed,
+ msg=output
+ )
+ else:
+ module_controller(module, gce)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py
new file mode 100644
index 00000000..3eed2df2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_labels.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_labels
+short_description: Create, Update or Destroy GCE Labels.
+description:
+ - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
+ When specifying the GCE resource, users may specify the full URL for
+ the resource (its 'self_link'), or the individual parameters of the
+ resource (type, location, name). Examples for the two options can be
+ seen in the documentation.
+ See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
+ more information about GCE Labels. Labels are gradually being added to
+ more GCE resources, so this module will need to be updated as new
+ resources are added to the GCE (v1) API.
+requirements:
+ - 'python >= 2.6'
+ - 'google-api-python-client >= 1.6.2'
+ - 'google-auth >= 1.0.0'
+ - 'google-auth-httplib2 >= 0.0.2'
+notes:
+ - Labels support resources such as instances, disks, images, etc. See
+ U(https://cloud.google.com/compute/docs/labeling-resources) for the list
+ of resources available in the GCE v1 API (not alpha or beta).
+author:
+ - 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
+options:
+ labels:
+ type: dict
+ description:
+ - A list of labels (key/value pairs) to add or remove for the resource.
+ required: false
+ resource_url:
+ type: str
+ description:
+ - The 'self_link' for the resource (instance, disk, snapshot, etc)
+ required: false
+ resource_type:
+ type: str
+ description:
+ - The type of resource (instances, disks, snapshots, images)
+ required: false
+ resource_location:
+ type: str
+ description:
+ - The location of resource (global, us-central1-f, etc.)
+ required: false
+ resource_name:
+ type: str
+ description:
+ - The name of resource.
+ required: false
+ state:
+ type: str
+ description: The state the labels should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Add labels on an existing instance (using resource_url)
+ community.google.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: present
+- name: Add labels on an image (using resource params)
+ community.google.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_type: images
+ resource_location: global
+ resource_name: my-custom-image
+ state: present
+- name: Remove specified labels from the GCE instance
+ community.google.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ environment: prod
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: absent
+'''
+
+RETURN = '''
+labels:
+ description: List of labels that exist on the resource.
+ returned: Always.
+ type: dict
+ sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
+resource_url:
+ description: The 'self_link' of the GCE resource.
+ returned: Always.
+ type: str
+ sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
+resource_type:
+ description: The type of the GCE resource.
+ returned: Always.
+ type: str
+ sample: instances
+resource_location:
+ description: The location of the GCE resource.
+ returned: Always.
+ type: str
+ sample: us-central1-f
+resource_name:
+ description: The name of the GCE resource.
+ returned: Always.
+ type: str
+ sample: my-happy-little-instance
+state:
+ description: state of the labels
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+
+
+UA_PRODUCT = 'ansible-gce_labels'
+UA_VERSION = '0.0.1'
+GCE_API_VERSION = 'v1'
+
+# TODO(all): As Labels are added to more GCE resources, this list will need to
+# be updated (along with some code changes below). The list can *only* include
+# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
+KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
+
+
+def _fetch_resource(client, module):
+ params = module.params
+ if params['resource_url']:
+ if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
+ module.fail_json(
+ msg='Invalid self_link url: %s' % params['resource_url'])
+ else:
+ parts = params['resource_url'].split('/')[8:]
+ if len(parts) == 2:
+ resource_type, resource_name = parts
+ resource_location = 'global'
+ else:
+ resource_location, resource_type, resource_name = parts
+ else:
+ if not params['resource_type'] or not params['resource_location'] \
+ or not params['resource_name']:
+ module.fail_json(msg='Missing required resource params.')
+ resource_type = params['resource_type'].lower()
+ resource_name = params['resource_name'].lower()
+ resource_location = params['resource_location'].lower()
+
+ if resource_type not in KNOWN_RESOURCES:
+ module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if resource_type == 'instances':
+ resource = client.instances().get(project=params['project_id'],
+ zone=resource_location,
+ instance=resource_name).execute()
+ elif resource_type == 'disks':
+ resource = client.disks().get(project=params['project_id'],
+ zone=resource_location,
+ disk=resource_name).execute()
+ elif resource_type == 'snapshots':
+ resource = client.snapshots().get(project=params['project_id'],
+ snapshot=resource_name).execute()
+ elif resource_type == 'images':
+ resource = client.images().get(project=params['project_id'],
+ image=resource_name).execute()
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % resource_type)
+
+ return resource.get('labelFingerprint', ''), {
+ 'resource_name': resource.get('name'),
+ 'resource_url': resource.get('selfLink'),
+ 'resource_type': resource_type,
+ 'resource_location': resource_location,
+ 'labels': resource.get('labels', {})
+ }
+
+
+def _set_labels(client, new_labels, module, ri, fingerprint):
+ params = module.params
+ result = err = None
+ labels = {
+ 'labels': new_labels,
+ 'labelFingerprint': fingerprint
+ }
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if ri['resource_type'] == 'instances':
+ req = client.instances().setLabels(project=params['project_id'],
+ instance=ri['resource_name'],
+ zone=ri['resource_location'],
+ body=labels)
+ elif ri['resource_type'] == 'disks':
+ req = client.disks().setLabels(project=params['project_id'],
+ zone=ri['resource_location'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'snapshots':
+ req = client.snapshots().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'images':
+ req = client.images().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
+
+ # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
+ # method to poll for the async request/operation to complete before
+ # returning. However, during 'beta', we are in an odd state where
+ # API requests must be sent to the 'compute/beta' API, but the python
+ # client library only allows for *Operations.get() requests to be
+ # sent to 'compute/v1' API. The response operation is in the 'beta'
+ # API-scope, but the client library cannot find the operation (404).
+ # result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ # return result, err
+ result = req.execute()
+ return True, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ labels=dict(required=False, type='dict', default={}),
+ resource_url=dict(required=False, type='str'),
+ resource_name=dict(required=False, type='str'),
+ resource_location=dict(required=False, type='str'),
+ resource_type=dict(required=False, type='str'),
+ project_id=dict()
+ ),
+ required_together=[
+ ['resource_name', 'resource_location', 'resource_type']
+ ],
+ mutually_exclusive=[
+ ['resource_url', 'resource_name'],
+ ['resource_url', 'resource_location'],
+ ['resource_url', 'resource_type']
+ ]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ client, cparams = get_google_api_client(module, 'compute',
+ user_agent_product=UA_PRODUCT,
+ user_agent_version=UA_VERSION,
+ api_version=GCE_API_VERSION)
+
+ # Get current resource info including labelFingerprint
+ fingerprint, resource_info = _fetch_resource(client, module)
+ new_labels = resource_info['labels'].copy()
+
+ update_needed = False
+ if module.params['state'] == 'absent':
+ for k, v in module.params['labels'].items():
+ if k in new_labels:
+ if new_labels[k] == v:
+ update_needed = True
+ new_labels.pop(k, None)
+ else:
+ module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
+ else:
+ for k, v in module.params['labels'].items():
+ if k not in new_labels:
+ update_needed = True
+ new_labels[k] = v
+
+ changed = False
+ json_output = {'state': module.params['state']}
+ if update_needed:
+ changed, err = _set_labels(client, new_labels, module, resource_info,
+ fingerprint)
+ json_output['changed'] = changed
+
+ # TODO(erjohnso): probably want to re-fetch the resource to return the
+ # new labelFingerprint, check that desired labels match updated labels.
+ # BUT! Will need to wait for setLabels() to hit v1 API so we can use the
+ # GCPUtils feature to poll for the operation to be complete. For now,
+ # we'll just update the output with what we have from the original
+ # state of the resource.
+ json_output.update(resource_info)
+ json_output.update(module.params)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py
new file mode 100644
index 00000000..ff29b56d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_lb.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_lb
+short_description: create/destroy GCE load-balancer resources
+description:
+ - This module can create and destroy Google Compute Engine C(loadbalancer)
+ and C(httphealthcheck) resources. The primary LB resource is the
+ C(load_balancer) resource and the health check parameters are all
+ prefixed with I(httphealthcheck).
+ The full documentation for Google Compute Engine load balancing is at
+ U(https://developers.google.com/compute/docs/load-balancing/). However,
+ the ansible module simplifies the configuration by following the
+ libcloud model.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ httphealthcheck_name:
+ type: str
+ description:
+ - the name identifier for the HTTP health check
+ httphealthcheck_port:
+ type: int
+ description:
+ - the TCP port to use for HTTP health checking
+ default: 80
+ httphealthcheck_path:
+ type: str
+ description:
+ - the url path to use for HTTP health checking
+ default: "/"
+ httphealthcheck_interval:
+ type: int
+ description:
+ - the duration in seconds between each health check request
+ default: 5
+ httphealthcheck_timeout:
+ type: int
+ description:
+ - the timeout in seconds before a request is considered a failed check
+ default: 5
+ httphealthcheck_unhealthy_count:
+ type: int
+ description:
+ - number of consecutive failed checks before marking a node unhealthy
+ default: 2
+ httphealthcheck_healthy_count:
+ type: int
+ description:
+ - number of consecutive successful checks before marking a node healthy
+ default: 2
+ httphealthcheck_host:
+ type: str
+ description:
+ - host header to pass through on HTTP check requests
+ name:
+ type: str
+ description:
+ - name of the load-balancer resource
+ protocol:
+ type: str
+ description:
+ - the protocol used for the load-balancer packet forwarding, tcp or udp
+ - "the available choices are: C(tcp) or C(udp)."
+ default: "tcp"
+ region:
+ type: str
+ description:
+ - the GCE region where the load-balancer is defined
+ external_ip:
+ type: str
+ description:
+ - the external static IPv4 (or auto-assigned) address for the LB
+ port_range:
+ type: str
+ description:
+ - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
+ members:
+ type: list
+ description:
+ - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
+ state:
+ type: str
+ description:
+ - desired state of the LB
+ - "the available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple example of creating a new LB, adding members, and a health check
+ local_action:
+ module: gce_lb
+ name: testlb
+ region: us-central1
+ members: ["us-central1-a/www-a", "us-central1-b/www-b"]
+ httphealthcheck_name: hc
+ httphealthcheck_port: 80
+ httphealthcheck_path: "/up"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.loadbalancer.types import Provider as Provider_lb
+ from libcloud.loadbalancer.providers import get_driver as get_driver_lb
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ httphealthcheck_name=dict(),
+ httphealthcheck_port=dict(default=80, type='int'),
+ httphealthcheck_path=dict(default='/'),
+ httphealthcheck_interval=dict(default=5, type='int'),
+ httphealthcheck_timeout=dict(default=5, type='int'),
+ httphealthcheck_unhealthy_count=dict(default=2, type='int'),
+ httphealthcheck_healthy_count=dict(default=2, type='int'),
+ httphealthcheck_host=dict(),
+ name=dict(),
+ protocol=dict(default='tcp'),
+ region=dict(),
+ external_ip=dict(),
+ port_range=dict(),
+ members=dict(type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
+
+ gce = gce_connect(module)
+
+ httphealthcheck_name = module.params.get('httphealthcheck_name')
+ httphealthcheck_port = module.params.get('httphealthcheck_port')
+ httphealthcheck_path = module.params.get('httphealthcheck_path')
+ httphealthcheck_interval = module.params.get('httphealthcheck_interval')
+ httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
+ httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count')
+ httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count')
+ httphealthcheck_host = module.params.get('httphealthcheck_host')
+ name = module.params.get('name')
+ protocol = module.params.get('protocol')
+ region = module.params.get('region')
+ external_ip = module.params.get('external_ip')
+ port_range = module.params.get('port_range')
+ members = module.params.get('members')
+ state = module.params.get('state')
+
+ try:
+ gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
+ gcelb.connection.user_agent_append("%s/%s" % (
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION))
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ changed = False
+ json_output = {'name': name, 'state': state}
+
+ if not name and not httphealthcheck_name:
+ module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False)
+
+ if state in ['active', 'present']:
+ # first, create the httphealthcheck if requested
+ hc = None
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
+ host=httphealthcheck_host, path=httphealthcheck_path,
+ port=httphealthcheck_port,
+ interval=httphealthcheck_interval,
+ timeout=httphealthcheck_timeout,
+ unhealthy_threshold=httphealthcheck_unhealthy_count,
+ healthy_threshold=httphealthcheck_healthy_count)
+ changed = True
+ except ResourceExistsError:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if hc is not None:
+ json_output['httphealthcheck_host'] = hc.extra['host']
+ json_output['httphealthcheck_path'] = hc.path
+ json_output['httphealthcheck_port'] = hc.port
+ json_output['httphealthcheck_interval'] = hc.interval
+ json_output['httphealthcheck_timeout'] = hc.timeout
+ json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold
+ json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold
+
+ # create the forwarding rule (and target pool under the hood)
+ lb = None
+ if name:
+ if not region:
+ module.fail_json(msg='Missing required region name',
+ changed=False)
+ nodes = []
+ output_nodes = []
+ json_output['name'] = name
+ # members is a python list of 'zone/inst' strings
+ if members:
+ for node in members:
+ try:
+ zone, node_name = node.split('/')
+ nodes.append(gce.ex_get_node(node_name, zone))
+ output_nodes.append(node)
+ except Exception:
+ # skip nodes that are badly formatted or don't exist
+ pass
+ try:
+ if hc is not None:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_healthchecks=[hc],
+ ex_address=external_ip)
+ else:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_address=external_ip)
+ changed = True
+ except ResourceExistsError:
+ lb = gcelb.get_balancer(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if lb is not None:
+ json_output['members'] = output_nodes
+ json_output['protocol'] = protocol
+ json_output['region'] = region
+ json_output['external_ip'] = lb.ip
+ json_output['port_range'] = lb.port
+ hc_names = []
+ if 'healthchecks' in lb.extra:
+ for hc in lb.extra['healthchecks']:
+ hc_names.append(hc.name)
+ json_output['httphealthchecks'] = hc_names
+
+ if state in ['absent', 'deleted']:
+ # first, delete the load balancer (forwarding rule and target pool)
+ # if specified.
+ if name:
+ json_output['name'] = name
+ try:
+ lb = gcelb.get_balancer(name)
+ gcelb.destroy_balancer(lb)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # destroy the health check if specified
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ gce.ex_destroy_healthcheck(hc)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py
new file mode 100644
index 00000000..fd47167f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_mig.py
@@ -0,0 +1,904 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ type: str
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ size:
+ type: int
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - GCE project ID
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ type: str
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ type: dict
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ named_ports:
+ type: list
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ recreate_instances:
+ type: bool
+ default: no
+ description:
+ - Recreate MIG instances.
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Resize MIG
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Delete MIG
+ community.google.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: str
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: str
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: str
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: int
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=None):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ filter_list = ['NONE'] if filter_list is None else filter_list
+
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py
new file mode 100644
index 00000000..48395f2a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_net.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_net
+short_description: create/destroy GCE networks and firewall rules
+description:
+ - This module can create and destroy Google Compute Engine networks and
+ firewall rules U(https://cloud.google.com/compute/docs/networking).
+ The I(name) parameter is reserved for referencing a network while the
+ I(fwname) parameter is used to reference firewall rules.
+ IPv4 Address ranges must be specified using the CIDR
+ U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ allowed:
+ type: str
+ description:
+ - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
+ this parameter is mandatory when creating or updating a firewall rule
+ ipv4_range:
+ type: str
+ description:
+ - the IPv4 address range in CIDR notation for the network
+ this parameter is not mandatory when you specified existing network in name parameter,
+ but when you create new network, this parameter is mandatory
+ fwname:
+ type: str
+ description:
+ - name of the firewall rule
+ name:
+ type: str
+ description:
+ - name of the network
+ src_range:
+ type: list
+ description:
+ - the source IPv4 address range in CIDR notation
+ default: []
+ src_tags:
+ type: list
+ description:
+ - the source instance tags for creating a firewall rule
+ default: []
+ target_tags:
+ type: list
+ description:
+ - the target instance tags for creating a firewall rule
+ default: []
+ state:
+ type: str
+ description:
+ - desired state of the network or firewall
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use C(credentials_file).
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ mode:
+ type: str
+ description:
+ - network mode for Google Cloud
+ C(legacy) indicates a network with an IP address range;
+ C(auto) automatically generates subnetworks in different regions;
+ C(custom) uses networks to group subnets of user specified IP address ranges
+ https://cloud.google.com/compute/docs/networking#network_types
+ default: "legacy"
+ choices: ["legacy", "auto", "custom"]
+ subnet_name:
+ type: str
+ description:
+ - name of subnet to create
+ subnet_region:
+ type: str
+ description:
+ - region of subnet to create
+ subnet_desc:
+ type: str
+ description:
+ - description of subnet to create
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a 'legacy' Network
+- name: Create Legacy Network
+ community.google.gce_net:
+ name: legacynet
+ ipv4_range: '10.24.17.0/24'
+ mode: legacy
+ state: present
+
+# Create an 'auto' Network
+- name: Create Auto Network
+ community.google.gce_net:
+ name: autonet
+ mode: auto
+ state: present
+
+# Create a 'custom' Network
+- name: Create Custom Network
+ community.google.gce_net:
+ name: customnet
+ mode: custom
+ subnet_name: "customsubnet"
+ subnet_region: us-east1
+ ipv4_range: '10.240.16.0/24'
+ state: "present"
+
+# Create Firewall Rule with Source Tags
+- name: Create Firewall Rule w/Source Tags
+ community.google.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_tags: "foo,bar"
+
+# Create Firewall Rule with Source Range
+- name: Create Firewall Rule w/Source Range
+ community.google.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_range: ['10.1.1.1/32']
+
+# Create Custom Subnetwork
+- name: Create Custom Subnetwork
+ community.google.gce_net:
+ name: privatenet
+ mode: custom
+ subnet_name: subnet_example
+ subnet_region: us-central1
+ ipv4_range: '10.0.0.0/16'
+'''
+
+RETURN = '''
+allowed:
+ description: Rules (ports and protocols) specified by this firewall rule.
+ returned: When specified
+ type: str
+ sample: "tcp:80;icmp"
+
+fwname:
+ description: Name of the firewall rule.
+ returned: When specified
+ type: str
+ sample: "my-fwname"
+
+ipv4_range:
+ description: IPv4 range of the specified network or subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "10.0.0.0/16"
+
+name:
+ description: Name of the network.
+ returned: always
+ type: str
+ sample: "my-network"
+
+src_range:
+ description: IP address blocks a firewall rule applies to.
+ returned: when specified
+ type: list
+ sample: [ '10.1.1.12/8' ]
+
+src_tags:
+ description: Instance Tags firewall rule applies to.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+
+state:
+ description: State of the item operated on.
+ returned: always
+ type: str
+ sample: "present"
+
+subnet_name:
+ description: Name of the subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "my-subnetwork"
+
+subnet_region:
+ description: Region of the specified subnet.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "us-east1"
+
+target_tags:
+ description: Instance Tags with these tags receive traffic allowed by firewall rule.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+'''
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def format_allowed_section(allowed):
+ """Format each section of the allowed list"""
+ if allowed.count(":") == 0:
+ protocol = allowed
+ ports = []
+ elif allowed.count(":") == 1:
+ protocol, ports = allowed.split(":")
+ else:
+ return []
+ if ports.count(","):
+ ports = ports.split(",")
+ elif ports:
+ ports = [ports]
+ return_val = {"IPProtocol": protocol}
+ if ports:
+ return_val["ports"] = ports
+ return return_val
+
+
+def format_allowed(allowed):
+ """Format the 'allowed' value so that it is GCE compatible."""
+ return_value = []
+ if allowed.count(";") == 0:
+ return [format_allowed_section(allowed)]
+ else:
+ sections = allowed.split(";")
+ for section in sections:
+ return_value.append(format_allowed_section(section))
+ return return_value
+
+
+def sorted_allowed_list(allowed_list):
+ """Sort allowed_list (output of format_allowed) by protocol and port."""
+ # sort by protocol
+ allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
+ # sort the ports list
+ return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ allowed=dict(),
+ ipv4_range=dict(),
+ fwname=dict(),
+ name=dict(),
+ src_range=dict(default=[], type='list'),
+ src_tags=dict(default=[], type='list'),
+ target_tags=dict(default=[], type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
+ subnet_name=dict(),
+ subnet_region=dict(),
+ subnet_desc=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ allowed = module.params.get('allowed')
+ ipv4_range = module.params.get('ipv4_range')
+ fwname = module.params.get('fwname')
+ name = module.params.get('name')
+ src_range = module.params.get('src_range')
+ src_tags = module.params.get('src_tags')
+ target_tags = module.params.get('target_tags')
+ state = module.params.get('state')
+ mode = module.params.get('mode')
+ subnet_name = module.params.get('subnet_name')
+ subnet_region = module.params.get('subnet_region')
+ subnet_desc = module.params.get('subnet_desc')
+
+ changed = False
+ json_output = {'state': state}
+
+ if state in ['active', 'present']:
+ network = None
+ subnet = None
+ try:
+ network = gce.ex_get_network(name)
+ json_output['name'] = name
+ if mode == 'legacy':
+ json_output['ipv4_range'] = network.cidr
+ if network and mode == 'custom' and subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = subnet.cidr
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants to create a new network that doesn't yet exist
+ if name and not network:
+ if not ipv4_range and mode != 'auto':
+ module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
+ changed=False)
+ args = [ipv4_range if mode == 'legacy' else None]
+ kwargs = {}
+ if mode != 'legacy':
+ kwargs['mode'] = mode
+
+ try:
+ network = gce.ex_create_network(name, *args, **kwargs)
+ json_output['name'] = name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except TypeError:
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
+ if not hasattr(gce, 'ex_create_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ if not subnet_name or not ipv4_range or not subnet_region:
+ module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
+
+ try:
+ subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=changed)
+
+ if fwname:
+ # user creating a firewall rule
+ if not allowed and not src_range and not src_tags:
+ if changed and network:
+ module.fail_json(
+ msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
+ module.fail_json(
+ msg="Missing required firewall rule parameter(s)",
+ changed=False)
+
+ allowed_list = format_allowed(allowed)
+
+ # Fetch existing rule and if it exists, compare attributes
+ # update if attributes changed. Create if doesn't exist.
+ try:
+ fw_changed = False
+ fw = gce.ex_get_firewall(fwname)
+
+ # If old and new attributes are different, we update the firewall rule.
+ # This implicitly lets us clear out attributes as well.
+ # allowed_list is required and must not be None for firewall rules.
+ if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
+ fw.allowed = allowed_list
+ fw_changed = True
+
+ # source_ranges might not be set in the project; cast it to an empty list
+ fw.source_ranges = fw.source_ranges or []
+
+ # If these attributes are lists, we sort them first, then compare.
+ # Otherwise, we update if they differ.
+ if fw.source_ranges != src_range:
+ if isinstance(src_range, list):
+ if sorted(fw.source_ranges) != sorted(src_range):
+ fw.source_ranges = src_range
+ fw_changed = True
+ else:
+ fw.source_ranges = src_range
+ fw_changed = True
+
+ # source_tags might not be set in the project; cast it to an empty list
+ fw.source_tags = fw.source_tags or []
+
+ if fw.source_tags != src_tags:
+ if isinstance(src_tags, list):
+ if sorted(fw.source_tags) != sorted(src_tags):
+ fw.source_tags = src_tags
+ fw_changed = True
+ else:
+ fw.source_tags = src_tags
+ fw_changed = True
+
+ # target_tags might not be set in the project; cast it to an empty list
+ fw.target_tags = fw.target_tags or []
+
+ if fw.target_tags != target_tags:
+ if isinstance(target_tags, list):
+ if sorted(fw.target_tags) != sorted(target_tags):
+ fw.target_tags = target_tags
+ fw_changed = True
+ else:
+ fw.target_tags = target_tags
+ fw_changed = True
+
+ if fw_changed is True:
+ try:
+ gce.ex_update_firewall(fw)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # Firewall rule not found so we try to create it.
+ except ResourceNotFoundError:
+ try:
+ gce.ex_create_firewall(fwname, allowed_list, network=name,
+ source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['fwname'] = fwname
+ json_output['allowed'] = allowed
+ json_output['src_range'] = src_range
+ json_output['src_tags'] = src_tags
+ json_output['target_tags'] = target_tags
+
+ if state in ['absent', 'deleted']:
+ if fwname:
+ json_output['fwname'] = fwname
+ fw = None
+ try:
+ fw = gce.ex_get_firewall(fwname)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if fw:
+ gce.ex_destroy_firewall(fw)
+ changed = True
+ elif subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ json_output['name'] = subnet_name
+ subnet = None
+ try:
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if subnet:
+ gce.ex_destroy_subnetwork(subnet)
+ changed = True
+ elif name:
+ json_output['name'] = name
+ network = None
+ try:
+ network = gce.ex_get_network(name)
+
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if network:
+ try:
+ gce.ex_destroy_network(network)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py
new file mode 100644
index 00000000..0e3093d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_pd.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_pd
+short_description: utilize GCE persistent disk resources
+description:
+ - This module can create and destroy unformatted GCE persistent disks
+ U(https://developers.google.com/compute/docs/disks#persistentdisks).
+ It also supports attaching and detaching disks from running instances.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ detach_only:
+ description:
+ - do not destroy the disk, merely detach it from an instance
+ type: bool
+ instance_name:
+ type: str
+ description:
+ - instance name if you wish to attach or detach the disk
+ mode:
+ type: str
+ description:
+ - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
+ default: "READ_ONLY"
+ choices: ["READ_WRITE", "READ_ONLY"]
+ name:
+ type: str
+ description:
+ - name of the disk
+ required: true
+ size_gb:
+ type: str
+ description:
+ - whole integer size of disk (in GB) to create, default is 10 GB
+ default: "10"
+ image:
+ type: str
+ description:
+ - the source image to use for the disk
+ snapshot:
+ type: str
+ description:
+ - the source snapshot to use for the disk
+ state:
+ type: str
+ description:
+ - desired state of the persistent disk
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ zone:
+ type: str
+ description:
+ - zone in which to create the disk
+ default: "us-central1-b"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ default: "pd-standard"
+ delete_on_termination:
+ description:
+ - If C(yes), deletes the volume when instance is terminated
+ type: bool
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple attachment action to an existing instance
+ local_action:
+ module: gce_pd
+ instance_name: notlocalhost
+ size_gb: 5
+ name: pd
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ delete_on_termination=dict(type='bool'),
+ detach_only=dict(type='bool'),
+ instance_name=dict(),
+ mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
+ name=dict(required=True),
+ size_gb=dict(default=10),
+ disk_type=dict(default='pd-standard'),
+ image=dict(),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ snapshot=dict(),
+ state=dict(default='present'),
+ zone=dict(default='us-central1-b'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ detach_only = module.params.get('detach_only')
+ instance_name = module.params.get('instance_name')
+ mode = module.params.get('mode')
+ name = module.params.get('name')
+ size_gb = module.params.get('size_gb')
+ disk_type = module.params.get('disk_type')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+
+ if delete_on_termination and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when requesting delete on termination',
+ changed=False)
+
+ if detach_only and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when detaching a disk',
+ changed=False)
+
+ disk = inst = None
+ changed = is_attached = False
+
+ json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type}
+ if detach_only:
+ json_output['detach_only'] = True
+ json_output['detached_from_instance'] = instance_name
+
+ if instance_name:
+ # user wants to attach/detach from an existing instance
+ try:
+ inst = gce.ex_get_node(instance_name, zone)
+ # is the disk attached?
+ for d in inst.extra['disks']:
+ if d['deviceName'] == name:
+ is_attached = True
+ json_output['attached_mode'] = d['mode']
+ json_output['attached_to_instance'] = inst.name
+ except Exception:
+ pass
+
+ # find disk if it already exists
+ try:
+ disk = gce.ex_get_volume(name)
+ json_output['size_gb'] = int(disk.size)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants a disk to exist. If "instance_name" is supplied the user
+ # also wants it attached
+ if state in ['active', 'present']:
+
+ if not size_gb:
+ module.fail_json(msg="Must supply a size_gb", changed=False)
+ try:
+ size_gb = int(round(float(size_gb)))
+ if size_gb < 1:
+ raise Exception
+ except Exception:
+ module.fail_json(msg="Must supply a size_gb larger than 1 GB",
+ changed=False)
+
+ if instance_name and inst is None:
+ module.fail_json(msg='Instance %s does not exist in zone %s' % (
+ instance_name, zone), changed=False)
+
+ if not disk:
+ if image is not None and snapshot is not None:
+ module.fail_json(
+ msg='Cannot give both image (%s) and snapshot (%s)' % (
+ image, snapshot), changed=False)
+ lc_image = None
+ lc_snapshot = None
+ if image_family is not None:
+ lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects)
+ elif image is not None:
+ lc_image = gce.ex_get_image(image, ex_project_list=external_projects)
+ elif snapshot is not None:
+ lc_snapshot = gce.ex_get_snapshot(snapshot)
+ try:
+ disk = gce.create_volume(
+ size_gb, name, location=zone, image=lc_image,
+ snapshot=lc_snapshot, ex_disk_type=disk_type)
+ except ResourceExistsError:
+ pass
+ except QuotaExceededError:
+ module.fail_json(msg='Requested disk size exceeds quota',
+ changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['size_gb'] = size_gb
+ if image is not None:
+ json_output['image'] = image
+ if snapshot is not None:
+ json_output['snapshot'] = snapshot
+ changed = True
+ if inst and not is_attached:
+ try:
+ gce.attach_volume(inst, disk, device=name, ex_mode=mode,
+ ex_auto_delete=delete_on_termination)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['attached_to_instance'] = inst.name
+ json_output['attached_mode'] = mode
+ if delete_on_termination:
+ json_output['delete_on_termination'] = True
+ changed = True
+
+ # user wants to delete a disk (or perhaps just detach it).
+ if state in ['absent', 'deleted'] and disk:
+
+ if inst and is_attached:
+ try:
+ gce.detach_volume(disk, ex_node=inst)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+ if not detach_only:
+ try:
+ gce.destroy_volume(disk)
+ except ResourceInUseError as e:
+ module.fail_json(msg=str(e.value), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py
new file mode 100644
index 00000000..6723f464
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_snapshot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_snapshot
+short_description: Create or destroy snapshots for GCE storage volumes
+description:
+ - Manages snapshots for GCE instances. This module manages snapshots for
+ the storage volumes of a GCE compute instance. If there are multiple
+ volumes, each snapshot will be prepended with the disk name
+options:
+ instance_name:
+ type: str
+ description:
+ - The GCE instance to snapshot
+ required: True
+ snapshot_name:
+ type: str
+ description:
+ - The name of the snapshot to manage
+ required: True
+ disks:
+ type: list
+ description:
+ - A list of disks to create snapshots for. If none is provided,
+ all of the volumes will have snapshots created.
+ required: False
+ state:
+ type: str
+ description:
+ - Whether a snapshot should be C(present) or C(absent)
+ required: false
+ default: present
+ choices: [present, absent]
+ service_account_email:
+ type: str
+ description:
+ - GCP service account email for the project where the instance resides
+ credentials_file:
+ type: path
+ description:
+ - The path to the credentials file associated with the service account
+ project_id:
+ type: str
+ description:
+ - The GCP project ID to use
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+author: Rob Wagner (@robwagner33)
+'''
+
+EXAMPLES = '''
+- name: Create gce snapshot
+ community.google.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+- name: Delete gce snapshot
+ community.google.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: absent
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+# This example creates snapshots for only two of the available disks as
+# disk0-example-snapshot and disk1-example-snapshot
+- name: Create snapshots of specific disks
+ community.google.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ disks:
+ - disk0
+ - disk1
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+'''
+
+RETURN = '''
+snapshots_created:
+ description: List of newly created snapshots
+ returned: When snapshots are created
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_deleted:
+ description: List of destroyed snapshots
+ returned: When snapshots are deleted
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_existing:
+ description: List of snapshots that already existed (no-op)
+ returned: When snapshots were already present
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_absent:
+ description: List of snapshots that were already absent (no-op)
+ returned: When snapshots were already absent
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+def find_snapshot(volume, name):
+ '''
+ Check if there is a snapshot already created with the given name for
+ the passed in volume.
+
+ Args:
+ volume: A gce StorageVolume object to manage
+ name: The name of the snapshot to look for
+
+ Returns:
+ The VolumeSnapshot object if one is found
+ '''
+ found_snapshot = None
+ snapshots = volume.list_snapshots()
+ for snapshot in snapshots:
+ if name == snapshot.name:
+ found_snapshot = snapshot
+ return found_snapshot
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ snapshot_name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ disks=dict(default=None, type='list'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ instance_name = module.params.get('instance_name')
+ snapshot_name = module.params.get('snapshot_name')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+
+ json_output = dict(
+ changed=False,
+ snapshots_created=[],
+ snapshots_deleted=[],
+ snapshots_existing=[],
+ snapshots_absent=[]
+ )
+
+ snapshot = None
+
+ instance = gce.ex_get_node(instance_name, 'all')
+ instance_disks = instance.extra['disks']
+
+ for instance_disk in instance_disks:
+ disk_snapshot_name = snapshot_name
+ disk_info = gce._get_components_from_path(instance_disk['source'])
+ device_name = disk_info['name']
+ device_zone = disk_info['zone']
+ if disks is None or device_name in disks:
+ volume_obj = gce.ex_get_volume(device_name, device_zone)
+
+ # If we have more than one disk to snapshot, prepend the disk name
+ if len(instance_disks) > 1:
+ disk_snapshot_name = device_name + "-" + disk_snapshot_name
+
+ snapshot = find_snapshot(volume_obj, disk_snapshot_name)
+
+ if snapshot and state == 'present':
+ json_output['snapshots_existing'].append(disk_snapshot_name)
+
+ elif snapshot and state == 'absent':
+ snapshot.destroy()
+ json_output['changed'] = True
+ json_output['snapshots_deleted'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'present':
+ volume_obj.snapshot(disk_snapshot_name)
+ json_output['changed'] = True
+ json_output['snapshots_created'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'absent':
+ json_output['snapshots_absent'].append(disk_snapshot_name)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py
new file mode 100644
index 00000000..4af31863
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gce_tag.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+short_description: add or remove tag(s) to/from GCE instances
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
+ to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone.
+options:
+ instance_name:
+ type: str
+ description:
+ - The name of the GCE instance to add/remove tags.
+ - Required if C(instance_pattern) is not specified.
+ instance_pattern:
+ type: str
+ description:
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
+ - If C(instance_name) is not specified, this field is required.
+ tags:
+ type: list
+ description:
+ - Comma-separated list of tags to add or remove.
+ required: yes
+ state:
+ type: str
+ description:
+ - Desired state of the tags.
+ choices: [ absent, present ]
+ default: present
+ zone:
+ type: str
+ description:
+ - The zone of the disk specified by source.
+ default: us-central1-a
+ service_account_email:
+ type: str
+ description:
+ - Service account email.
+ pem_file:
+ type: path
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - Your GCE project ID.
+requirements:
+ - python >= 2.6
+ - apache-libcloud >= 0.17.0
+notes:
+ - Either I(instance_name) or I(instance_pattern) is required.
+author:
+ - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com>
+ - Tom Melendez (@supertom)
+'''
+
+EXAMPLES = '''
+- name: Add tags to instance
+ community.google.gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+- name: Remove tags from instance in default zone (us-central1-a)
+ community.google.gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+- name: Add tags to instances in zone that match pattern
+ community.google.gce_tag:
+ instance_pattern: test-server-*
+ tags: foo,bar
+ zone: us-central1-a
+ state: present
+'''
+
+import re
+import traceback
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gce import gce_connect
+
+
+def _union_items(baselist, comparelist):
+ """Combine two lists, removing duplicates."""
+ return list(set(baselist) | set(comparelist))
+
+
+def _intersect_items(baselist, comparelist):
+ """Return matching items in both lists."""
+ return list(set(baselist) & set(comparelist))
+
+
+def _get_changed_items(baselist, comparelist):
+ """Return changed items as they relate to baselist."""
+ return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
+
+
+def modify_tags(gce, module, node, tags, state='present'):
+ """Modify tags on an instance."""
+
+ existing_tags = node.extra['tags']
+ tags = [x.lower() for x in tags]
+ tags_changed = []
+
+ if state == 'absent':
+ # tags changed are any that intersect
+ tags_changed = _intersect_items(existing_tags, tags)
+ if not tags_changed:
+ return False, None
+ # update instance with tags in existing tags that weren't specified
+ node_tags = _get_changed_items(existing_tags, tags)
+ else:
+ # tags changed are any that in the new list that weren't in existing
+ tags_changed = _get_changed_items(tags, existing_tags)
+ if not tags_changed:
+ return False, None
+ # update instance with the combined list
+ node_tags = _union_items(existing_tags, tags)
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(type='str'),
+ instance_pattern=dict(type='str'),
+ tags=dict(type='list', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ zone=dict(type='str', default='us-central1-a'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ project_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['instance_name', 'instance_pattern']
+ ],
+ required_one_of=[
+ ['instance_name', 'instance_pattern']
+ ],
+ )
+
+ instance_name = module.params.get('instance_name')
+ instance_pattern = module.params.get('instance_pattern')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ # Create list of nodes to operate on
+ matching_nodes = []
+ try:
+ if instance_pattern:
+ instances = gce.list_nodes(ex_zone=zone)
+ # no instances in zone
+ if not instances:
+ module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
+ try:
+ # Python regex fully supported: https://docs.python.org/2/library/re.html
+ p = re.compile(instance_pattern)
+ matching_nodes = [i for i in instances if p.search(i.name) is not None]
+ except re.error as e:
+ module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
+ else:
+ matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
+
+ # Tag nodes
+ instance_pattern_matches = []
+ tags_changed = []
+ for node in matching_nodes:
+ changed, tags_changed = modify_tags(gce, module, node, tags, state)
+ if changed:
+ instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
+ if instance_pattern:
+ module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
+ else:
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py
new file mode 100644
index 00000000..2d9230c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcpubsub
+short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub
+description:
+ - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+requirements:
+ - google-auth >= 0.5.0
+ - google-cloud-pubsub >= 0.22.0
+notes:
+ - Subscription pull happens before publish. You cannot publish and pull in the same task.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name.
+ - Only the name, not the full path, is required.
+ required: yes
+ subscription:
+ type: dict
+ description:
+ - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
+ suboptions:
+ name:
+ description:
+ - Subfield of subscription. Required if subscription is specified. See examples.
+ ack_deadline:
+ description:
+ - Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
+ pull:
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the
+ provided subscription name. max_messages (int; default None; max number of messages to pull),
+ message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged,
+ changed is set to True, otherwise, changed is False.
+ push_endpoint:
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ publish:
+ type: list
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ state:
+ type: str
+ description:
+ - State of the topic or queue.
+ - Applies to the most granular resource.
+ - If subscription isspecified we remove it.
+ - If only topic is specified, that is what is removed.
+ - NOTE - A topic can be removed without first removing the subscription.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+# (Message will be pushed; there is no check to see if the message was pushed before
+- name: Create a topic and publish a message to it
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ state: present
+
+# Subscriptions associated with topic are not deleted.
+- name: Delete Topic
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ state: absent
+
+# Setting absent will keep the messages from being sent
+- name: Publish multiple messages, with attributes (key:value available with the message)
+ community.google.gcpubsub:
+ topic: '{{ topic_name }}'
+ state: present
+ publish:
+ - message: this is message 1
+ attributes:
+ mykey1: myvalue
+ mykey2: myvalu2
+ mykey3: myvalue3
+ - message: this is message 2
+ attributes:
+ server: prod
+ sla: "99.9999"
+ owner: fred
+
+- name: Create Subscription (pull)
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: present
+
+# pull is default, ack_deadline is not required
+- name: Create Subscription with ack_deadline and push endpoint
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ ack_deadline: "60"
+ push_endpoint: http://pushendpoint.example.com
+ state: present
+
+# Setting push_endpoint to "None" converts subscription to pull.
+- name: Subscription change from push to pull
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: mysub
+ push_endpoint: "None"
+
+### Topic will not be deleted
+- name: Delete subscription
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: absent
+
+# only pull keyword is required.
+- name: Pull messages from subscription
+ community.google.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: ansible-topic-example-sub
+ pull:
+ message_ack: yes
+ max_messages: "100"
+'''
+
+RETURN = '''
+publish:
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ returned: Only when specified
+ type: list
+ sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
+
+pulled_messages:
+ description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
+ returned: Only when subscription.pull is specified
+ type: list
+ sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
+
+state:
+ description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+subscription:
+ description: Name of subscription.
+ returned: When subscription fields are specified
+ type: str
+ sample: "mysubscription"
+
+topic:
+ description: Name of topic.
+ returned: Always
+ type: str
+ sample: "mytopic"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+CLOUD_CLIENT = 'google-cloud-pubsub'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
+
+
+def publish_messages(message_list, topic):
+ with topic.batch() as batch:
+ for message in message_list:
+ msg = message['message']
+ attrs = {}
+ if 'attributes' in message:
+ attrs = message['attributes']
+ batch.publish(bytes(msg), **attrs)
+ return True
+
+
+def pull_messages(pull_params, sub):
+ """
+ :rtype: tuple (output, changed)
+ """
+ changed = False
+ max_messages = pull_params.get('max_messages', None)
+ message_ack = pull_params.get('message_ack', 'no')
+ return_immediately = pull_params.get('return_immediately', False)
+
+ output = []
+ pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages)
+
+ for ack_id, msg in pulled:
+ msg_dict = {'message_id': msg.message_id,
+ 'attributes': msg.attributes,
+ 'data': msg.data,
+ 'ack_id': ack_id}
+ output.append(msg_dict)
+
+ if message_ack:
+ ack_ids = [m['ack_id'] for m in output]
+ if ack_ids:
+ sub.acknowledge(ack_ids)
+ changed = True
+ return (output, changed)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ topic=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ publish=dict(type='list'),
+ subscription=dict(type='dict'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['publish'] = module.params.get('publish')
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['subscription'] = module.params.get('subscription')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
+
+ changed = False
+ json_output = {}
+
+ t = None
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ s = None
+ if mod_params['subscription']:
+ # Note: default ack deadline cannot be changed without deleting/recreating subscription
+ s = t.subscription(mod_params['subscription']['name'],
+ ack_deadline=mod_params['subscription'].get('ack_deadline', None),
+ push_endpoint=mod_params['subscription'].get('push_endpoint', None))
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If subscription is specified
+ # we remove it. If only topic is specified, that is what is removed.
+ # Note that a topic can be removed without first removing the subscription.
+ # TODO(supertom): Enhancement: Provide an option to only delete a topic
+ # if there are no subscriptions associated with it (which the API does not support).
+ if s is not None:
+ if s.exists():
+ s.delete()
+ changed = True
+ else:
+ if t.exists():
+ t.delete()
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not t.exists():
+ t.create()
+ changed = True
+ if s:
+ if not s.exists():
+ s.create()
+ s.reload()
+ changed = True
+ else:
+ # Subscription operations
+ # TODO(supertom): if more 'update' operations arise, turn this into a function.
+ s.reload()
+ push_endpoint = mod_params['subscription'].get('push_endpoint', None)
+ if push_endpoint is not None:
+ if push_endpoint != s.push_endpoint:
+ if push_endpoint == 'None':
+ push_endpoint = None
+ s.modify_push_configuration(push_endpoint=push_endpoint)
+ s.reload()
+ changed = push_endpoint == s.push_endpoint
+
+ if 'pull' in mod_params['subscription']:
+ if s.push_endpoint is not None:
+ module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
+ (json_output['pulled_messages'], changed) = pull_messages(
+ mod_params['subscription']['pull'], s)
+
+ # publish messages to the topic
+ if mod_params['publish'] and len(mod_params['publish']) > 0:
+ changed = publish_messages(mod_params['publish'], t)
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py
new file mode 100644
index 00000000..1feac1e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/plugins/modules/gcpubsub_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.google.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.google.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.google.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.google.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.google.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.google') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.ini b/collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.ini
new file mode 100644
index 00000000..af27a9c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.ini
@@ -0,0 +1,76 @@
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# The GCE inventory script has the following dependencies:
+# 1. A valid Google Cloud Platform account with Google Compute Engine
+# enabled. See https://cloud.google.com
+# 2. An OAuth2 Service Account flow should be enabled. This will generate
+# a private key file that the inventory script will use for API request
+# authorization. See https://developers.google.com/accounts/docs/OAuth2
+# 3. Convert the private key from PKCS12 to PEM format
+# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
+# > -nodes -nocerts | openssl rsa -out pkey.pem
+# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
+#
+# (See ansible/test/gce_tests.py comments for full install instructions)
+#
+# Author: Eric Johnson <erjohnso@google.com>
+# Contributors: John Roach <johnroach1985@gmail.com>
+
+[gce]
+# GCE Service Account configuration information can be stored in the
+# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
+# exist in your PYTHONPATH and be picked up automatically with an import
+# statement in the inventory script. However, you can specify an absolute
+# path to the secrets.py file with 'libcloud_secrets' parameter.
+# This option will be deprecated in a future release.
+libcloud_secrets =
+
+# If you are not going to use a 'secrets.py' file, you can set the necessary
+# authorization parameters here.
+# You can add multiple gce projects to by using a comma separated list. Make
+# sure that the service account used has permissions on said projects.
+gce_service_account_email_address =
+gce_service_account_pem_file_path =
+gce_project_id =
+gce_zone =
+
+# Filter inventory based on state. Leave undefined to return instances regardless of state.
+# example: Uncomment to only return inventory in the running or provisioning state
+#instance_states = RUNNING,PROVISIONING
+
+# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags.
+# example: Uncomment to only return inventory with the http-server or https-server tag
+#instance_tags = http-server,https-server
+
+
+[inventory]
+# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
+# contain the instance internal or external address. Values may be either
+# 'internal' or 'external'. If 'external' is specified but no external instance
+# address exists, the internal address will be used.
+# The INVENTORY_IP_TYPE environment variable will override this value.
+inventory_ip_type =
+
+[cache]
+# directory in which cache should be created
+cache_path = ~/.ansible/tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# To disable the cache, set this value to 0
+cache_max_age = 300
diff --git a/collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.py b/collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.py
new file mode 100644
index 00000000..05a93f48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/scripts/inventory/gce.py
@@ -0,0 +1,524 @@
+#!/usr/bin/env python
+
+# Copyright: (c) 2013, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library. Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+ - gce_subnetwork
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+ zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - machine type
+ types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+ group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+ when using an ephemeral/scratch disk, this will be set to the image name
+ used when creating the instance (e.g. debian-7-wheezy-v20130816). when
+ your instance was created with a root persistent disk it will be set to
+ 'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the GCE inventory script to print out instance specific information
+ $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson <erjohnso@google.com>
+Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
+ John Roach <johnroach1985@gmail.com>
+Version: 0.0.4
+'''
+
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
+USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION = "v2"
+
+import sys
+import os
+import argparse
+
+from time import time
+
+from ansible.module_utils.six.moves import configparser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+import json
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ _ = Provider.GCE
+except Exception:
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
+
+
+class GceInventory(object):
+ def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.config = self.get_config()
+ self.drivers = self.get_gce_drivers()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a ConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
+ """
+ gce_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = configparser.ConfigParser(defaults={
+ 'gce_service_account_email_address': '',
+ 'gce_service_account_pem_file_path': '',
+ 'gce_project_id': '',
+ 'gce_zone': '',
+ 'libcloud_secrets': '',
+ 'instance_tags': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
+ })
+ if 'gce' not in config.sections():
+ config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
+ config.read(gce_ini_path)
+
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Set the instance_tags filter, env var overrides config from file
+ # and cli param overrides all
+ if self.args.instance_tags:
+ self.instance_tags = self.args.instance_tags
+ else:
+ self.instance_tags = os.environ.get(
+ 'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
+ if self.instance_tags:
+ self.instance_tags = self.instance_tags.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_drivers(self):
+ """Determine the GCE authorization settings and return a list of
+ libcloud drivers.
+ """
+ # Attempt to get GCE params from a configuration file, if one
+ # exists.
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
+ secrets_found = False
+
+ try:
+ import secrets
+ args = list(secrets.GCE_PARAMS)
+ kwargs = secrets.GCE_KEYWORD_PARAMS
+ secrets_found = True
+ except Exception:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify libcloud secrets file as "
+ err += "/absolute/path/to/secrets.py"
+ sys.exit(err)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except Exception:
+ pass
+
+ if not secrets_found:
+ args = [
+ self.config.get('gce', 'gce_service_account_email_address'),
+ self.config.get('gce', 'gce_service_account_pem_file_path')
+ ]
+ kwargs = {'project': self.config.get('gce', 'gce_project_id'),
+ 'datacenter': self.config.get('gce', 'gce_zone')}
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ args[0] = os.environ.get('GCE_EMAIL', args[0])
+ args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+ args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
+
+ kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+ kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
+
+ gce_drivers = []
+ projects = kwargs['project'].split(',')
+ for project in projects:
+ kwargs['project'] = project
+ gce = get_driver(Provider.GCE)(*args, **kwargs)
+ gce.connection.user_agent_append(
+ '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+ )
+ gce_drivers.append(gce)
+ return gce_drivers
+
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on GCE')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--instance-tags', action='store',
+ help='Only include instances with this tags, separated by comma')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def node_to_dict(self, inst):
+ md = {}
+
+ if inst is None:
+ return {}
+
+ if 'items' in inst.extra['metadata']:
+ for entry in inst.extra['metadata']['items']:
+ md[entry['key']] = entry['value']
+
+ net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ subnet = None
+ if 'subnetwork' in inst.extra['networkInterfaces'][0]:
+ subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+ return {
+ 'gce_uuid': inst.uuid,
+ 'gce_id': inst.id,
+ 'gce_image': inst.image,
+ 'gce_machine_type': inst.size,
+ 'gce_private_ip': inst.private_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+ 'gce_name': inst.name,
+ 'gce_description': inst.extra['description'],
+ 'gce_status': inst.extra['status'],
+ 'gce_zone': inst.extra['zone'].name,
+ 'gce_tags': inst.extra['tags'],
+ 'gce_metadata': md,
+ 'gce_network': net,
+ 'gce_subnetwork': subnet,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': ssh_host
+ }
+
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
+ try:
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
+ except Exception as e:
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ for driver in self.drivers:
+ driver.connection.gce_params = params
+ all_nodes.extend(driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
+ '''Group all instances'''
+ groups = {}
+ meta = {}
+ meta["hostvars"] = {}
+
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
+ # This check filters on the desired instance tags defined in the
+ # config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
+ # or as the cli param --instance-tags.
+ #
+ # If the instance_tags list is _empty_ then _ALL_ instances are returned.
+ #
+ # If the instance_tags list is _populated_ then check the current
+ # instance tags against the instance_tags list. If the instance has
+ # at least one tag from the instance_tags list, it is returned.
+ if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
+ continue
+
+ name = node.name
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.extra['zone'].name
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups:
+ groups[zone].append(name)
+ else:
+ groups[zone] = [name]
+
+ tags = node.extra['tags']
+ for t in tags:
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
+ if tag in groups:
+ groups[tag].append(name)
+ else:
+ groups[tag] = [name]
+
+ net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ net = 'network_%s' % net
+ if net in groups:
+ groups[net].append(name)
+ else:
+ groups[net] = [name]
+
+ machine_type = node.size
+ if machine_type in groups:
+ groups[machine_type].append(name)
+ else:
+ groups[machine_type] = [name]
+
+ image = node.image or 'persistent_disk'
+ if image in groups:
+ groups[image].append(name)
+ else:
+ groups[image] = [name]
+
+ status = node.extra['status']
+ stat = 'status_%s' % status.lower()
+ if stat in groups:
+ groups[stat].append(name)
+ else:
+ groups[stat] = [name]
+
+ for private_ip in node.private_ips:
+ groups[private_ip] = [name]
+
+ if len(node.public_ips) >= 1:
+ for public_ip in node.public_ips:
+ groups[public_ip] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+if __name__ == '__main__':
+ GceInventory()
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..c233efa5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,22 @@
+plugins/modules/gce_eip.py pylint:blacklisted-name
+plugins/modules/gce_eip.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_img.py pylint:blacklisted-name
+plugins/modules/gce_instance_template.py pylint:blacklisted-name
+plugins/modules/gce_instance_template.py validate-modules:doc-missing-type
+plugins/modules/gce_instance_template.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_labels.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_lb.py pylint:blacklisted-name
+plugins/modules/gce_lb.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_mig.py pylint:blacklisted-name
+plugins/modules/gce_mig.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_net.py pylint:blacklisted-name
+plugins/modules/gce_net.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_pd.py pylint:blacklisted-name
+plugins/modules/gce_pd.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_snapshot.py pylint:blacklisted-name
+plugins/modules/gce_snapshot.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_tag.py pylint:blacklisted-name
+plugins/modules/gce_tag.py validate-modules:parameter-list-no-elements
+plugins/modules/gcpubsub.py validate-modules:parameter-list-no-elements
+plugins/modules/gcpubsub_info.py validate-modules:parameter-state-invalid-choice
+scripts/inventory/gce.py pylint:blacklisted-name
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.11.txt
new file mode 100644
index 00000000..c233efa5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,22 @@
+plugins/modules/gce_eip.py pylint:blacklisted-name
+plugins/modules/gce_eip.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_img.py pylint:blacklisted-name
+plugins/modules/gce_instance_template.py pylint:blacklisted-name
+plugins/modules/gce_instance_template.py validate-modules:doc-missing-type
+plugins/modules/gce_instance_template.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_labels.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_lb.py pylint:blacklisted-name
+plugins/modules/gce_lb.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_mig.py pylint:blacklisted-name
+plugins/modules/gce_mig.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_net.py pylint:blacklisted-name
+plugins/modules/gce_net.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_pd.py pylint:blacklisted-name
+plugins/modules/gce_pd.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_snapshot.py pylint:blacklisted-name
+plugins/modules/gce_snapshot.py validate-modules:parameter-list-no-elements
+plugins/modules/gce_tag.py pylint:blacklisted-name
+plugins/modules/gce_tag.py validate-modules:parameter-list-no-elements
+plugins/modules/gcpubsub.py validate-modules:parameter-list-no-elements
+plugins/modules/gcpubsub_info.py validate-modules:parameter-state-invalid-choice
+scripts/inventory/gce.py pylint:blacklisted-name
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..f2dffaa4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,11 @@
+plugins/modules/gce_eip.py pylint:blacklisted-name
+plugins/modules/gce_img.py pylint:blacklisted-name
+plugins/modules/gce_instance_template.py pylint:blacklisted-name
+plugins/modules/gce_instance_template.py validate-modules:doc-missing-type
+plugins/modules/gce_lb.py pylint:blacklisted-name
+plugins/modules/gce_mig.py pylint:blacklisted-name
+plugins/modules/gce_net.py pylint:blacklisted-name
+plugins/modules/gce_pd.py pylint:blacklisted-name
+plugins/modules/gce_snapshot.py pylint:blacklisted-name
+plugins/modules/gce_tag.py pylint:blacklisted-name
+scripts/inventory/gce.py pylint:blacklisted-name
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_auth.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_auth.py
new file mode 100644
index 00000000..845234ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_auth.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Tom Melendez (@supertom) <tom@supertom.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import pytest
+
+from ansible_collections.community.google.tests.unit.compat import mock, unittest
+from ansible_collections.community.google.plugins.module_utils.gcp import (_get_gcp_ansible_credentials, _get_gcp_credentials, _get_gcp_environ_var,
+ _get_gcp_environment_credentials,
+ _validate_credentials_file)
+
+# Fake data/function used for testing
+fake_env_data = {'GCE_EMAIL': 'gce-email'}
+
+
+def fake_get_gcp_environ_var(var_name, default_value):
+ if var_name not in fake_env_data:
+ return default_value
+ else:
+ return fake_env_data[var_name]
+
+# Fake AnsibleModule for use in tests
+
+
+class FakeModule(object):
+ class Params():
+ data = {}
+
+ def get(self, key, alt=None):
+ if key in self.data:
+ return self.data[key]
+ else:
+ return alt
+
+ def __init__(self, data=None):
+ data = {} if data is None else data
+
+ self.params = FakeModule.Params()
+ self.params.data = data
+
+ def fail_json(self, **kwargs):
+ raise ValueError("fail_json")
+
+ def deprecate(self, **kwargs):
+ return None
+
+
+class GCPAuthTestCase(unittest.TestCase):
+ """Tests to verify different Auth mechanisms."""
+
+ def setup_method(self, method):
+ global fake_env_data
+ fake_env_data = {'GCE_EMAIL': 'gce-email'}
+
+ def test_get_gcp_ansible_credentials(self):
+ input_data = {'service_account_email': 'mysa',
+ 'credentials_file': 'path-to-file.json',
+ 'project_id': 'my-cool-project'}
+
+ module = FakeModule(input_data)
+ actual = _get_gcp_ansible_credentials(module)
+ expected = tuple(input_data.values())
+ self.assertEqual(sorted(expected), sorted(actual))
+
+ def test_get_gcp_environ_var(self):
+ # Chose not to mock this so we could really verify that it
+ # works as expected.
+ existing_var_name = 'gcp_ansible_auth_test_54321'
+ non_existing_var_name = 'doesnt_exist_gcp_ansible_auth_test_12345'
+ os.environ[existing_var_name] = 'foobar'
+ self.assertEqual('foobar', _get_gcp_environ_var(
+ existing_var_name, None))
+ del os.environ[existing_var_name]
+ self.assertEqual('default_value', _get_gcp_environ_var(
+ non_existing_var_name, 'default_value'))
+
+ def test_validate_credentials_file(self):
+ # TODO(supertom): Only dealing with p12 here, check the other states
+ # of this function
+ module = FakeModule()
+ with mock.patch('ansible_collections.community.google.plugins.module_utils.gcp.open',
+ mock.mock_open(read_data='foobar'), create=True):
+ # pem condition, warning is suppressed with the return_value
+ credentials_file = '/foopath/pem.pem'
+ with self.assertRaises(ValueError):
+ _validate_credentials_file(module,
+ credentials_file=credentials_file,
+ require_valid_json=False,
+ check_libcloud=False)
+
+ @mock.patch('ansible_collections.community.google.plugins.module_utils.gcp._get_gcp_environ_var',
+ side_effect=fake_get_gcp_environ_var)
+ def test_get_gcp_environment_credentials(self, mockobj):
+ global fake_env_data
+
+ actual = _get_gcp_environment_credentials(None, None, None)
+ expected = tuple(['gce-email', None, None])
+ self.assertEqual(expected, actual)
+
+ fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem'}
+ expected = tuple([None, '/path/to/pem.pem', None])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # pem and creds are set, expect creds
+ fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem',
+ 'GCE_CREDENTIALS_FILE_PATH': '/path/to/creds.json'}
+ expected = tuple([None, '/path/to/creds.json', None])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # expect GOOGLE_APPLICATION_CREDENTIALS over PEM
+ fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem',
+ 'GOOGLE_APPLICATION_CREDENTIALS': '/path/to/appcreds.json'}
+ expected = tuple([None, '/path/to/appcreds.json', None])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # project tests
+ fake_env_data = {'GCE_PROJECT': 'my-project'}
+ expected = tuple([None, None, 'my-project'])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ fake_env_data = {'GOOGLE_CLOUD_PROJECT': 'my-cloud-project'}
+ expected = tuple([None, None, 'my-cloud-project'])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # data passed in, picking up project id only
+ fake_env_data = {'GOOGLE_CLOUD_PROJECT': 'my-project'}
+ expected = tuple(['my-sa-email', '/path/to/creds.json', 'my-project'])
+ actual = _get_gcp_environment_credentials(
+ 'my-sa-email', '/path/to/creds.json', None)
+ self.assertEqual(expected, actual)
+
+ @mock.patch('ansible_collections.community.google.plugins.module_utils.gcp._get_gcp_environ_var',
+ side_effect=fake_get_gcp_environ_var)
+ def test_get_gcp_credentials(self, mockobj):
+ global fake_env_data
+
+ fake_env_data = {}
+ module = FakeModule()
+ module.params.data = {}
+ # Nothing is set, calls fail_json
+ with pytest.raises(ValueError):
+ _get_gcp_credentials(module)
+
+ # project_id (only) is set from Ansible params.
+ module.params.data['project_id'] = 'my-project'
+ actual = _get_gcp_credentials(
+ module, require_valid_json=True, check_libcloud=False)
+ expected = {'service_account_email': '',
+ 'project_id': 'my-project',
+ 'credentials_file': ''}
+ self.assertEqual(expected, actual)
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_utils.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_utils.py
new file mode 100644
index 00000000..7098f705
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/module_utils/test_utils.py
@@ -0,0 +1,361 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Tom Melendez <tom@supertom.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.google.tests.unit.compat import mock, unittest
+from ansible_collections.community.google.plugins.module_utils.gcp import check_min_pkg_version, GCPUtils, GCPInvalidURLError
+
+
+def build_distribution(version):
+ obj = mock.MagicMock()
+ obj.version = '0.5.0'
+ return obj
+
+
+class GCPUtilsTestCase(unittest.TestCase):
+ params_dict = {
+ 'url_map_name': 'foo_url_map_name',
+ 'description': 'foo_url_map description',
+ 'host_rules': [
+ {
+ 'description': 'host rules description',
+ 'hosts': [
+ 'www.example.com',
+ 'www2.example.com'
+ ],
+ 'path_matcher': 'host_rules_path_matcher'
+ }
+ ],
+ 'path_matchers': [
+ {
+ 'name': 'path_matcher_one',
+ 'description': 'path matcher one',
+ 'defaultService': 'bes-pathmatcher-one-default',
+ 'pathRules': [
+ {
+ 'service': 'my-one-bes',
+ 'paths': [
+ '/',
+ '/aboutus'
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'path_matcher_two',
+ 'description': 'path matcher two',
+ 'defaultService': 'bes-pathmatcher-two-default',
+ 'pathRules': [
+ {
+ 'service': 'my-two-bes',
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ @mock.patch("pkg_resources.get_distribution", side_effect=build_distribution)
+ def test_check_minimum_pkg_version(self, mockobj):
+ self.assertTrue(check_min_pkg_version('foobar', '0.4.0'))
+ self.assertTrue(check_min_pkg_version('foobar', '0.5.0'))
+ self.assertFalse(check_min_pkg_version('foobar', '0.6.0'))
+
+ def test_parse_gcp_url(self):
+ # region, resource, entity, method
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/regions/us-east1/instanceGroupManagers/my-mig/recreateInstances'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertEqual('us-east1', actual['region'])
+ self.assertEqual('instanceGroupManagers', actual['resource_name'])
+ self.assertEqual('my-mig', actual['entity_name'])
+ self.assertEqual('recreateInstances', actual['method_name'])
+
+ # zone, resource, entity, method
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/zones/us-east1-c/instanceGroupManagers/my-mig/recreateInstances'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertEqual('us-east1-c', actual['zone'])
+ self.assertEqual('instanceGroupManagers', actual['resource_name'])
+ self.assertEqual('my-mig', actual['entity_name'])
+ self.assertEqual('recreateInstances', actual['method_name'])
+
+ # global, resource
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertTrue('global' in actual)
+ self.assertTrue(actual['global'])
+ self.assertEqual('urlMaps', actual['resource_name'])
+
+ # global, resource, entity
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/my-url-map'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('myproject', actual['project'])
+ self.assertTrue('global' in actual)
+ self.assertTrue(actual['global'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('compute', actual['service'])
+
+ # global URL, resource, entity, method_name
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/mybackendservice/getHealth'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertTrue('global' in actual)
+ self.assertTrue(actual['global'])
+ self.assertEqual('backendServices', actual['resource_name'])
+ self.assertEqual('mybackendservice', actual['entity_name'])
+ self.assertEqual('getHealth', actual['method_name'])
+
+ # no location in URL
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy/setUrlMap'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertFalse('global' in actual)
+ self.assertEqual('targetHttpProxies', actual['resource_name'])
+ self.assertEqual('mytargetproxy', actual['entity_name'])
+ self.assertEqual('setUrlMap', actual['method_name'])
+
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertFalse('global' in actual)
+ self.assertEqual('targetHttpProxies', actual['resource_name'])
+ self.assertEqual('mytargetproxy', actual['entity_name'])
+
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertFalse('global' in actual)
+ self.assertEqual('targetHttpProxies', actual['resource_name'])
+
+ # test exceptions
+ no_projects_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global/backendServices/mybackendservice/getHealth'
+ no_resource_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global'
+
+ no_resource_no_loc_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject'
+
+ with self.assertRaises(GCPInvalidURLError) as cm:
+ GCPUtils.parse_gcp_url(no_projects_input_url)
+ self.assertTrue(cm.exception, GCPInvalidURLError)
+
+ with self.assertRaises(GCPInvalidURLError) as cm:
+ GCPUtils.parse_gcp_url(no_resource_input_url)
+ self.assertTrue(cm.exception, GCPInvalidURLError)
+
+ with self.assertRaises(GCPInvalidURLError) as cm:
+ GCPUtils.parse_gcp_url(no_resource_no_loc_input_url)
+ self.assertTrue(cm.exception, GCPInvalidURLError)
+
+ def test_params_to_gcp_dict(self):
+
+ expected = {
+ 'description': 'foo_url_map description',
+ 'hostRules': [
+ {
+ 'description': 'host rules description',
+ 'hosts': [
+ 'www.example.com',
+ 'www2.example.com'
+ ],
+ 'pathMatcher': 'host_rules_path_matcher'
+ }
+ ],
+ 'name': 'foo_url_map_name',
+ 'pathMatchers': [
+ {
+ 'defaultService': 'bes-pathmatcher-one-default',
+ 'description': 'path matcher one',
+ 'name': 'path_matcher_one',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/',
+ '/aboutus'
+ ],
+ 'service': 'my-one-bes'
+ }
+ ]
+ },
+ {
+ 'defaultService': 'bes-pathmatcher-two-default',
+ 'description': 'path matcher two',
+ 'name': 'path_matcher_two',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ],
+ 'service': 'my-two-bes'
+ }
+ ]
+ }
+ ]
+ }
+
+ actual = GCPUtils.params_to_gcp_dict(self.params_dict, 'url_map_name')
+ self.assertEqual(expected, actual)
+
+ def test_get_gcp_resource_from_methodId(self):
+ input_data = 'compute.urlMaps.list'
+ actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
+ self.assertEqual('urlMaps', actual)
+ input_data = None
+ actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
+ self.assertFalse(actual)
+ input_data = 666
+ actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
+ self.assertFalse(actual)
+
+ def test_get_entity_name_from_resource_name(self):
+ input_data = 'urlMaps'
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual('urlMap', actual)
+ input_data = 'targetHttpProxies'
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual('targetHttpProxy', actual)
+ input_data = 'globalForwardingRules'
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual('forwardingRule', actual)
+ input_data = ''
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual(None, actual)
+ input_data = 666
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual(None, actual)
+
+ def test_are_params_equal(self):
+ params1 = {'one': 1}
+ params2 = {'one': 1}
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertTrue(actual)
+
+ params1 = {'one': 1}
+ params2 = {'two': 2}
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertFalse(actual)
+
+ params1 = {'three': 3, 'two': 2, 'one': 1}
+ params2 = {'one': 1, 'two': 2, 'three': 3}
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertTrue(actual)
+
+ params1 = {
+ "creationTimestamp": "2017-04-21T11:19:20.718-07:00",
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
+ "description": "",
+ "fingerprint": "ickr_pwlZPU=",
+ "hostRules": [
+ {
+ "description": "",
+ "hosts": [
+ "*."
+ ],
+ "pathMatcher": "path-matcher-one"
+ }
+ ],
+ "id": "8566395781175047111",
+ "kind": "compute#urlMap",
+ "name": "newtesturlmap-foo",
+ "pathMatchers": [
+ {
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
+ "description": "path matcher one",
+ "name": "path-matcher-one",
+ "pathRules": [
+ {
+ "paths": [
+ "/data",
+ "/aboutus"
+ ],
+ "service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
+ }
+ ]
+ }
+ ],
+ "selfLink": "https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/newtesturlmap-foo"
+ }
+ params2 = {
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
+ "hostRules": [
+ {
+ "description": "",
+ "hosts": [
+ "*."
+ ],
+ "pathMatcher": "path-matcher-one"
+ }
+ ],
+ "name": "newtesturlmap-foo",
+ "pathMatchers": [
+ {
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
+ "description": "path matcher one",
+ "name": "path-matcher-one",
+ "pathRules": [
+ {
+ "paths": [
+ "/data",
+ "/aboutus"
+ ],
+ "service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
+ }
+ ]
+ }
+ ],
+ }
+
+ # params1 has exclude fields, params2 doesn't. Should be equal
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertTrue(actual)
+
+ def test_filter_gcp_fields(self):
+ input_data = {
+ u'kind': u'compute#httpsHealthCheck',
+ u'description': u'',
+ u'timeoutSec': 5,
+ u'checkIntervalSec': 5,
+ u'port': 443,
+ u'healthyThreshold': 2,
+ u'host': u'',
+ u'requestPath': u'/',
+ u'unhealthyThreshold': 2,
+ u'creationTimestamp': u'2017-05-16T15:09:36.546-07:00',
+ u'id': u'8727093129334146639',
+ u'selfLink': u'https://www.googleapis.com/compute/v1/projects/myproject/global/httpsHealthChecks/myhealthcheck',
+ u'name': u'myhealthcheck'}
+
+ expected = {
+ 'name': 'myhealthcheck',
+ 'checkIntervalSec': 5,
+ 'port': 443,
+ 'unhealthyThreshold': 2,
+ 'healthyThreshold': 2,
+ 'host': '',
+ 'timeoutSec': 5,
+ 'requestPath': '/'}
+
+ actual = GCPUtils.filter_gcp_fields(input_data)
+ self.assertEqual(expected, actual)
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/test_gce_tag.py b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/test_gce_tag.py
new file mode 100644
index 00000000..3a06f18d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/plugins/modules/test_gce_tag.py
@@ -0,0 +1,66 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.community.google.plugins.modules.gce_tag import _get_changed_items, _intersect_items, _union_items
+
+
+class TestGCETag(unittest.TestCase):
+ """Unit tests for gce_tag module."""
+
+ def test_union_items(self):
+ """
+ Combine items in both lists
+ removing duplicates.
+ """
+ listA = [1, 2, 3, 4, 5, 8, 9]
+ listB = [1, 2, 3, 4, 5, 6, 7]
+ want = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ got = _union_items(listA, listB)
+ self.assertEqual(want, got)
+
+ def test_intersect_items(self):
+ """
+ All unique items from either list.
+ """
+ listA = [1, 2, 3, 4, 5, 8, 9]
+ listB = [1, 2, 3, 4, 5, 6, 7]
+ want = [1, 2, 3, 4, 5]
+ got = _intersect_items(listA, listB)
+ self.assertEqual(want, got)
+
+ # tags removed
+ new_tags = ['one', 'two']
+ existing_tags = ['two']
+ want = ['two'] # only remove the tag that was present
+ got = _intersect_items(existing_tags, new_tags)
+ self.assertEqual(want, got)
+
+ def test_get_changed_items(self):
+ """
+ All the items from left list that don't match
+ any item from the right list.
+ """
+ listA = [1, 2, 3, 4, 5, 8, 9]
+ listB = [1, 2, 3, 4, 5, 6, 7]
+ want = [8, 9]
+ got = _get_changed_items(listA, listB)
+ self.assertEqual(want, got)
+
+ # simulate new tags added
+ tags_to_add = ['one', 'two']
+ existing_tags = ['two']
+ want = ['one']
+ got = _get_changed_items(tags_to_add, existing_tags)
+ self.assertEqual(want, got)
+
+ # simulate removing tags
+ # specifying one tag on right that doesn't exist
+ tags_to_remove = ['one', 'two']
+ existing_tags = ['two', 'three']
+ want = ['three']
+ got = _get_changed_items(existing_tags, tags_to_remove)
+ self.assertEqual(want, got)
diff --git a/collections-debian-merged/ansible_collections/community/google/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/community/google/tests/unit/requirements.txt
new file mode 100644
index 00000000..16494a44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/google/tests/unit/requirements.txt
@@ -0,0 +1 @@
+unittest2 ; python_version < '2.7' \ No newline at end of file