summaryrefslogtreecommitdiffstats
path: root/ansible_collections/cloudscale_ch/cloud
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/cloudscale_ch/cloud')
-rw-r--r--ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml8
-rw-r--r--ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml35
-rw-r--r--ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml33
-rw-r--r--ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml118
-rw-r--r--ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml36
-rw-r--r--ansible_collections/cloudscale_ch/cloud/.gitignore4
-rw-r--r--ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst161
-rw-r--r--ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md6
-rw-r--r--ansible_collections/cloudscale_ch/cloud/COPYING621
-rw-r--r--ansible_collections/cloudscale_ch/cloud/FILES.json1370
-rw-r--r--ansible_collections/cloudscale_ch/cloud/MANIFEST.json36
-rw-r--r--ansible_collections/cloudscale_ch/cloud/README.md129
-rw-r--r--ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore1
-rw-r--r--ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml148
-rw-r--r--ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml29
-rw-r--r--ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep0
-rw-r--r--ansible_collections/cloudscale_ch/cloud/codecov.yml8
-rw-r--r--ansible_collections/cloudscale_ch/cloud/meta/runtime.yml57
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py0
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py37
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py0
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py216
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py379
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py468
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py285
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer.py240
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_health_monitor.py398
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_listener.py271
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool.py217
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool_member.py319
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py197
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py161
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py737
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py171
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py322
-rw-r--r--ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py265
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases1
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml20
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml24
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml19
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_load_balancers.yml24
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml17
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml17
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml17
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml24
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml17
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml6
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml7
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml415
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml158
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml38
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml27
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/defaults/main.yml2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/failures.yml67
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/main.yml13
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/setup.yml19
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/tests.yml188
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/defaults/main.yml9
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/failures.yml60
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/main.yml9
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/setup.yml35
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/tests.yml412
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/defaults/main.yml12
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/failures.yml69
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/main.yml9
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/setup.yml23
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/tests.yml248
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/defaults/main.yml4
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/failures.yml51
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/main.yml12
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/setup.yml11
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/tests.yml160
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/defaults/main.yml8
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/failures.yml34
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/main.yml12
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/setup.yml43
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/tests.yml306
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml44
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml8
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml159
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml44
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml8
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml151
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml53
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml14
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml991
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml45
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml8
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml180
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml82
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml10
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml13
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml243
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases2
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml3
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml5
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml31
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml38
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml13
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml10
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml260
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/requirements.txt1
-rw-r--r--ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt0
132 files changed, 12615 insertions, 0 deletions
diff --git a/ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml b/ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml
new file mode 100644
index 000000000..607e7e1a2
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml
@@ -0,0 +1,8 @@
+# Set update schedule for GitHub Actions
+---
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml
new file mode 100644
index 000000000..f2333cff4
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml
@@ -0,0 +1,35 @@
+name: Scheduled Cleanup
+
+on:
+ schedule:
+ - cron: '0 2 * * *' # UTC
+
+jobs:
+ cleanup:
+ name: Cleanup possible leftovers
+ runs-on: ubuntu-latest
+ steps:
+ - name: Ensure no other integration test is currently running
+ uses: softprops/turnstyle@v1
+ timeout-minutes: 60
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ same-branch-only: false
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.8'
+
+ - name: Run cleanup
+ run: |
+ pip install cloudscale-cli
+ for resource in objects-user server floating-ip server-group network;
+ do
+ echo "cleaning up $resource...";
+ cloudscale $resource list --delete --force;
+ echo "...done";
+ done
+ env:
+ CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }}
diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml
new file mode 100644
index 000000000..bf7e5e7b6
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml
@@ -0,0 +1,33 @@
+name: Publish release on Ansible Galaxy
+
+on:
+ release:
+ types: [published]
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: ansible_collections/cloudscale_ch/cloud
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ path: ansible_collections/cloudscale_ch/cloud
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install ansible
+
+ - name: Build and publish
+ env:
+ ANSIBLE_GALAXY_API_KEY: ${{ secrets.ANSIBLE_GALAXY_API_KEY }}
+ run: |
+ ansible-galaxy collection build .
+ ansible-galaxy collection publish *.tar.gz --api-key $ANSIBLE_GALAXY_API_KEY
diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml
new file mode 100644
index 000000000..11f63f3bc
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml
@@ -0,0 +1,118 @@
+name: Collection Integration tests
+
+on:
+ schedule:
+ - cron: "0 5 * * *" # UTC
+ push:
+ tags: "v*"
+ branches:
+ - "test/**"
+jobs:
+ integration-test:
+ name: >-
+ Integration test on ansible-core ${{ matrix.ansible }}
+ using Python ${{ matrix.python }}
+ runs-on: ubuntu-22.04
+ defaults:
+ run:
+ working-directory: ansible_collections/cloudscale_ch/cloud
+ strategy:
+ max-parallel: 1
+ fail-fast: false
+ matrix:
+ ansible:
+ - 2.13
+ - 2.14
+ - 2.15
+ include:
+ - ansible: 2.13
+ python: 3.8
+ - ansible: 2.14
+ python: 3.9
+ - ansible: 2.15
+ python: 3.9
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ path: ansible_collections/cloudscale_ch/cloud
+
+ - name: Set up Python ${{ matrix.python }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Install ansible and collection dependencies
+ run: |
+ python -m pip install --upgrade pip
+ # Install the latest ansible-core patchlevel version
+ pip install 'ansible-core~=${{ matrix.ansible }}.0'
+ pip install -r tests/requirements.txt
+
+ - name: Build and install collection
+ run: |
+ ansible-galaxy collection build .
+ ansible-galaxy collection install *.gz
+
+ - name: Add config file
+ env:
+ CONFIG_FILE: ${{ secrets.CONFIG_FILE }}
+ INI_FILE: tests/integration/cloud-config-cloudscale.ini
+ run: |
+ echo -n "$CONFIG_FILE" > $INI_FILE && [ -s $INI_FILE ] || (>&2 echo no secrets provided; exit 1)
+
+ - name: Ensure no other integration test is currently running
+ uses: softprops/turnstyle@v1
+ timeout-minutes: 60
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ same-branch-only: false
+
+ - name: Run the tests
+ run: >-
+ ansible-test
+ integration
+ --docker
+ --python ${{ matrix.python }}
+ -v
+ --diff
+ --color
+ --allow-unsupported
+ --continue-on-error
+ --coverage
+ cloud/cloudscale/
+
+ - name: Generate coverage report.
+ run: >-
+ ansible-test
+ coverage xml
+ -v
+ --requirements
+ --group-by command
+ --group-by version
+
+ - uses: codecov/codecov-action@v3
+ with:
+ fail_ci_if_error: false
+
+ - name: Send mail in case of failure
+ id: send_mail
+ if: ${{ failure() && github.event_name == 'schedule' }}
+ shell: python3 {0}
+ run: |
+ from smtplib import SMTP
+ from email.message import EmailMessage
+
+ email = EmailMessage()
+ email['TO'] = '${{ secrets.CRON_RCPT }}'
+ email['FROM'] = 'noreply@github.com'
+ email['Subject'] = 'Ansible Cloud Module Integration Test Failure'
+ email.set_content("""
+ Integration tests using ansible-core ${{ matrix.ansible }} on Python ${{ matrix.python }} failed:
+ https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ """)
+
+ with SMTP('${{ secrets.MAILSERVER }}') as smtp:
+ smtp.starttls()
+ smtp.send_message(email)
diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml
new file mode 100644
index 000000000..719d83f47
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml
@@ -0,0 +1,36 @@
+name: Sanity tests
+on:
+ schedule:
+ - cron: "32 5 * * *"
+ pull_request:
+
+jobs:
+ sanity:
+ name: Sanity tests (${{ matrix.ansible }})
+ defaults:
+ run:
+ working-directory: ansible_collections/cloudscale_ch/cloud
+ strategy:
+ matrix:
+ ansible:
+ - stable-2.13
+ - stable-2.14
+ - stable-2.15
+ - devel
+ runs-on: ubuntu-22.04
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ path: ansible_collections/cloudscale_ch/cloud
+
+ - name: Set up Python 3
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+
+ - name: Install ansible-base (${{ matrix.ansible }})
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ - name: Run sanity tests
+ run: ansible-test sanity --docker -v --color
diff --git a/ansible_collections/cloudscale_ch/cloud/.gitignore b/ansible_collections/cloudscale_ch/cloud/.gitignore
new file mode 100644
index 000000000..dadcc76ef
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/.gitignore
@@ -0,0 +1,4 @@
+*.pyc
+*.tar.gz
+tests/integration/cloud-config-cloudscale.ini
+tests/output/
diff --git a/ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst b/ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst
new file mode 100644
index 000000000..02dd771c8
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst
@@ -0,0 +1,161 @@
+==============================================
+Ansible Collection cloudscale.ch Release Notes
+==============================================
+
+.. contents:: Topics
+
+
+v2.3.1
+======
+
+Bugfixes
+--------
+
+- Add missing modules to the "cloudscale_ch.cloud.cloudscale" action group.
+- Remove outdated Ansible version requirement from the README.
+
+v2.3.0
+======
+
+Major Changes
+-------------
+
+- Bump minimum required Ansible version to 2.13.0
+
+New Modules
+-----------
+
+- load_balancer - Manages load balancers on the cloudscale.ch IaaS service
+- load_balancer_health_monitor - Manages load balancers on the cloudscale.ch IaaS service
+- load_balancer_listener - Manages load balancer listeners on the cloudscale.ch IaaS service
+- load_balancer_pool - Manages load balancer pools on the cloudscale.ch IaaS service
+- load_balancer_pool_member - Manages load balancer pool members on the cloudscale.ch IaaS service
+
+v2.2.4
+======
+
+Minor Changes
+-------------
+
+- Add UEFI firmware type option for custom images.
+
+v2.2.3
+======
+
+Minor Changes
+-------------
+
+- Fixed a typo in region code.
+- Fixed various documentation typos.
+- Streamlined the flavors to the new format ``flex-y-x`` across the related modules and tests.
+
+v2.2.2
+======
+
+Minor Changes
+-------------
+
+- Fixed inventory documentation.
+
+v2.2.1
+======
+
+Minor Changes
+-------------
+
+- Updated documentation: ``ssh_keys`` is a YAML list, not a string.
+
+v2.2.0
+======
+
+Major Changes
+-------------
+
+- Add custom_image module
+
+Minor Changes
+-------------
+
+- Increase api_timeout to 45
+- Read CLOUDSCALE_API_TIMEOUT environment variable
+
+New Modules
+-----------
+
+- custom_image - Manage custom images on the cloudscale.ch IaaS service
+
+v2.1.0
+======
+
+Minor Changes
+-------------
+
+- Add interface parameter to server module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54).
+- Rename server_uuids parameter to servers in volume module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54).
+
+Deprecated Features
+-------------------
+
+- The aliases ``server_uuids`` and ``server_uuid`` of the servers parameter in the volume module will be removed in version 3.0.0.
+
+v2.0.0
+======
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- floating_ip - ``name`` is required for assigning a new floating IP.
+
+v1.3.1
+======
+
+Minor Changes
+-------------
+
+- Implemented identical naming support of the same resource type per zone (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/46).
+
+Bugfixes
+--------
+
+- Fix inventory plugin failing to launch (https://github.com/cloudscale-ch/ansible-collection-cloudscale/issues/49).
+
+v1.3.0
+======
+
+Minor Changes
+-------------
+
+- floating_ip - Added an optional name parameter to gain idempotency. The parameter will be required for assigning a new floating IP with release of version 2.0.0 (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/43/).
+- floating_ip - Allow to reserve an IP without assignment to a server (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/31/).
+
+New Modules
+-----------
+
+- subnet - Manages subnets on the cloudscale.ch IaaS service
+
+v1.2.0
+======
+
+Minor Changes
+-------------
+
+- server_group - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/23).
+- volume - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/24).
+
+New Modules
+-----------
+
+- network - Manages networks on the cloudscale.ch IaaS service
+
+v1.1.0
+======
+
+Minor Changes
+-------------
+
+- floating_ip - added tags support (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/16)
+
+New Modules
+-----------
+
+- objects_user - Manages objects users on the cloudscale.ch IaaS service
diff --git a/ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md b/ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md
new file mode 100644
index 000000000..1b2b6da2a
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md
@@ -0,0 +1,6 @@
+# Contributing
+
+Any contribution is welcome and we only ask contributors to:
+
+- Create an issue for any significant contribution that would change a large portion of the code base
+- Provide at least integration tests for any contribution
diff --git a/ansible_collections/cloudscale_ch/cloud/COPYING b/ansible_collections/cloudscale_ch/cloud/COPYING
new file mode 100644
index 000000000..94a045322
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/COPYING
@@ -0,0 +1,621 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
diff --git a/ansible_collections/cloudscale_ch/cloud/FILES.json b/ansible_collections/cloudscale_ch/cloud/FILES.json
new file mode 100644
index 000000000..9552af178
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/FILES.json
@@ -0,0 +1,1370 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df5a34d1733725aefa8f5caff80de8cf197cf21396479aad5bd70d35ea7744d9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "919ef00776e7d2ff349950ac4b806132aa9faf006e214d5285de54533e443b33",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "281f8e67542260c31e3da2659c50236ce74c2c727487179bc751fb568c49e56e",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c6ca5e5be6ca15c79a2c8c0e33852bea83f5392991dc1d34b29602dd81dde9e",
+ "format": 1
+ },
+ {
+ "name": "codecov.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af6b28485318d71eee003780eb62183364d82f1a50917cdcaaa4ffb88a178d51",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/load_balancer_pool_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0e5698a59843f407b554fe4bb488950cb8ca3c04df93f143a80d7ee7edcf9f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/load_balancer_health_monitor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "600d1229aa72078d4154ebbf8d843707b1aa5c5f4cedcc5257958751821d1253",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/load_balancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21eedb771a1b3e4f58d2e9a333e286f906eefc4c0e8e5bcb0c05e7ccb4b352cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/objects_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c756f640a0b808db7a5892b40e4a9cb4770909470cb5040ebeccc27a28bcdb17",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/floating_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eba753b038fe3fc425a97f312ae64c9c4031f61123977e5512c07820a842a199",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b2cb880ae29727265abf4a3908ed1fd4b70f172e3a04ee94940de3e6ab9d800",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/custom_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f91a946d65708caeb1d2192e64fe2037c1515f25c08882c03750260b3fc6d87b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/server_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8942f6d76b4f4957eb0e60b263184e33e09b88270770637cc1527029c44c3f7e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "735f7cb448f3f8bd5788d0b9db3bfe71b9f6351fb0b3ef1295b84adc15f70856",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/load_balancer_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ef1d57100ed3d7eabf13e77a1e95b7d7fca8a0b52e36f160e3634e34b18d9bc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f9891ead798ef042bdb042f28c157d2717bae2af800f5cdeaa180f9bc049217",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/load_balancer_listener.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d2c2f0e414ed00d4179bb9ac0cfe5a4b03371fd3bd9bd66c49ab2322ae2a112",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee0b344c580fbb0816486bff7a061cee8e110cbf5dae22f22e4a9a092ebd0eee",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e44f742653062165562bfdded791e6047a267b05ea8fbba2f58ae9305effc5e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71fb8dd2988c23f2e66370270ff8bfab7478100adb4cc64da81df2baf1886749",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/api_parameters.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0e3046cf350435d812fd0ba4f40afa020bc4229dfc973461b524d0e06591072",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4da3851108cf118d70fb2d8d5bdd4052043ab824fd34d83932212ff433c44731",
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c61f12da7cdad526bdcbed47a4c0a603e60dbbfdaf8b66933cd088e9132c303f",
+ "format": 1
+ },
+ {
+ "name": "CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6c817a7f8c0522150e2f8ffd9af76a78fc35026b9e0705bde9c4c64523c55e5",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfc6c77534214c87a0a023d21b6dff41891f3ab439dc934484adfde7d1def736",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3ce1eba741aaee12d2aa6fa734e0584f58388f4988413d2d976513180ac1c72",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39075af57f8eba3ce7f50067dc89259ab4e5391411ebccde06383f54307f8800",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5c43334636f75ec79693cbbdb680061d3a426ae3944d5d84ccb9f93f43886b6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16c91b0f2138feb7cbd31d8999e7a637245697d1cd9ad385323335aa0ce64ae9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0aad717d5a396cf4e0874c1d06c0e6cbe616ad2edd8cb90fff6ab1bca3f690a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f6678ae770bce9975f94066fe5cb8e301a34f675b3184fc23f4d63a771e020a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c62b70d3736e04c69afc2df1d9d09360a158bb27f9a13fc58f0cf0d596eb624c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_health_monitor/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "779498436712af0a3815fff15dc1e1d198d8476aa82c6748fbb13c3de50e26b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edf78a775dee58f5c9e4a2b6846658270cead8672876258c41106e2716972820",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0aad717d5a396cf4e0874c1d06c0e6cbe616ad2edd8cb90fff6ab1bca3f690a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c216ece1b3b5c9bbb6078b5f2e73f6d81899e6ee36f3ae65f51b09829cf3f2ae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f80e6521679c7eef368a88ba9a595bbcbd9d949a27a16639652b6bf2bf17963",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_listener/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74ac8a67d4c5ffb9a10b7ede68334ccdb79aee420296df9503087fb754209c85",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ceffe3b799c94704c35c2a5ca338771704310ec857a3873ce735fa9ee03a9de",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b724df2f209815c7df7dcf07c2a9aa0fd9d79436d2746544b7cc297f2bf344c8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efde8b87588119edf1bc3cc1f88262921e2921b32e7d014948c31953bc244bdc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f33c63221a39d6601b60c9aa3f03fc9e456b81cf23a58cde80d9457f555f2b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77537c4e65800d253b112af3394bfc22d6cc0fa983805e6b2ef4b116fcb71292",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b0bd4339cbd6fa569d5978bc1fdd9c17e48caad61df972eb6302efc90f14059",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4502dfc21bdfdb9745464d0fbe758d44f147be0211b3badf1da18be61f5d4c35",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91c97da6315f99fd38e2929a8408e67de37be5a47cde811add50dfb410634b2c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4af001ebfd47d2c63a9b159900ab4a4ca9d5240c8180156c1afdaf743d087f25",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool_member/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11e29d0cd37bfd003008f9ce8bcad2b9cfb5a2cac9ce5f8df084d4ff19ebdfd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/tasks/floating_ip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "402b982a02df802093aee7979fcefc37506a388805b0c791e866737726e165db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31705d25f3bf944957153d3c930086f5babcf873958adc76da981ecaf5bfb579",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/floating_ip/tasks/unassigned.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd49224daf8665d9980a0041ac445c0402da2718fee85842cd7bf43369c9cfc0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c6f0d3942e38ede79365f096d78892a420172d300f5fea0f79f8f36767d06d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eddcaa01aabe22df954c1c352a81f8456de8476be5e8aa3ec229e4a0698b19ec",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60128cb6423e9f2b21ba9e05cc670325b99d3b6fbbb807d3d4474e100c7c377a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d52e7e9120b1accdd689734b41e8bc34ecd81f74775c3a800f35d36c07ca63d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/subnet/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d8107e1bdb7deab792785788e8745b42588f71783cafcc2ce1a32d35c1c27ef",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3007eefc5cb739910601691a7f66c4f9c571f1e32b033346563fd5499cd83e8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2626113d52c4f1113b41e75af461ad16f779271aee0b01c7600f4f3b8c753a50",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da1f31d57f5785e0866673baae9e2f1e1672efd5c423c5da541176cb7953c116",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f778d0044cd830b629812b1ca8815b6eeee99f562741af269f6de00e6469980",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "508756b3b7948faef7ec9aa96a8449da255578b6b657523c442180879b49489c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/volume/tasks/deprecation_warning.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "639841041872b51e9e1da5815bf4b61de540b48e9a6920436f5d5a315fb0a4a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6766719eabd380f4239027c1da1663c6bca162716dcba27b82aa015c126765cc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d2d9b8da573fa0339395327d86dc979404d749f3e5c3e15110d46644dde4924",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/custom_image/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c349cbac5f1f476a19ca2a347f811fb5d74755358fb4e86249040e8aeba9f17b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be793a4af2cbb5f5398e0ba3fae822b4a6f76be183f69163f679cf6ca888ff3f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a8e9848f5767e67ce778eca1bdd5044403bccd43586143644d5a6e678f826fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/network/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da1e8e05fe4586478da692ee2f38b07b8691dafeea6d46f770f58b51260df7b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27d4b996ea78fddc8152d8dc5f2a87461ec8972baecb9a494b384c0f163b9bc3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24758d9aae6a825817c2bdfe3314a557264ef5c640ae784b4e26fd07d2e677dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/server_group/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f41b4068697edfbb80ffa3e1e171b82e016a72b17efc299c77283fe88f774124",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46e001acebb586b961302c5e8876eab0fe2a2c136b7f026da04014df64b30acb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f522a4d67cfc9d074d2e2e9f1a6d0aa274e87712fddb00905ea9262f39f472f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/objects_user/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e3142fac71c2169fa650af4216cf5d69eef1d80bfe020fbe74549d997d8e1c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a4dbdc933b7aae8ce100f8261e6789fd180a01249a3d9235e3669c0c3f5c872",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4502dfc21bdfdb9745464d0fbe758d44f147be0211b3badf1da18be61f5d4c35",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/tasks/failures.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9138215331b041df0b0ea4c29e2456f166392a3eae3073595a24bfc41de2248c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac032908719ff893890d9c6dea0ad9db58d60dd35ef4dfbb83447e5e93279f3e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/load_balancer_pool/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afdefce1aa8a8d01a375c36229e86144ed637e7992ee57b821c5a6ad38662d52",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e084a3683ef795d1cdbf5e9b253f2ca1f783ae0d0d6e47e419acbbc4fc80bbfa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63dd078899b099fdd988defcad94a7514f83827057caefc2b4cb9dd31188ce32",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f4342705d85638595cc3dd584302e9c860bd83d383653c3d9fe283ee98fcf19",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_load_balancers.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5eb9e0e728c219877d5bf3f8f08cd0252d073996a6cdf3c89349e548b1515ba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_volumes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb224f8cf56d2a24e81c14bf3b8974577d1d9aab78cb8c100fdc552055113c9c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_networks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76183d1bd899652a5c8deea41b0f27ad64cd268ce3e7a8a69314c7193c677be8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_objects_users.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52891380666cf92aeb5d8797cc3e5b0567772e2641714a775a05ccb2279d2a98",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_floating_ips.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bab4ca163d566a13f9abacdf179f59fc186ca160c777937fc7eb76616993c7f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_servers.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58ea8194da127df1082eb88523f6b713d3d3e323a62279ed52a1e0e96efc6bf7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_custom_images.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9def0b75c5ff882cfd15504cbcaea755124887b5cfbc3670551c48e8f2f8524",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/common/tasks/cleanup_server_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d92489602b40f14f4f3156a04b2a39841f01a3296b837365fd9887e25b84e3b8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/cloud-config-cloudscale.ini.template",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b442f74e11fdcdc779e16b3bf0a9f359b04b13b7d5c17175ff12d4b93eeb0b63",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a5a2e4665194900a0d24968456298eaac531c0b10a3fac4f10be5c58047c0e1",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5283f7d2053ff9e8b243ede392044b98cf8cc4ec5098fd4349c185de457a9008",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cb222f83601eae3cd163a63ac9de9b261505e2210197d86febd0a21db366f5b",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/dependabot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d207e80d10726360f2046d4b2473a3cfd9f9eca99590281fa39d88f78e745145",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/test-sanity.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75de31af0dafcd1c99a7896583e754331bce883fd48aa28dcb99fe61259f5c05",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/test-integration.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e03e8405c84e3b986e5b7b7a4e3b2a92247d97c31cce52e572b96e38bb32bf4c",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d176e1f384ac6ca05fc5b29640bc9b0866410121e9efff2a26c42cdc491e5c49",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/publish-ansible-galaxy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7900c906da270b42918a7684f37093356fa49989061b113d956ccd5d9a1e79c",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/cloudscale_ch/cloud/MANIFEST.json b/ansible_collections/cloudscale_ch/cloud/MANIFEST.json
new file mode 100644
index 000000000..59887a4bf
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/MANIFEST.json
@@ -0,0 +1,36 @@
+{
+ "collection_info": {
+ "namespace": "cloudscale_ch",
+ "name": "cloud",
+ "version": "2.3.1",
+ "authors": [
+ "Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>",
+ "Denis Krienb\u00fchl <denis.krienbuehl@cloudscale.ch>",
+ "Ren\u00e9 Moser <mail@renemoser.net>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "cloud",
+ "cloudscale",
+ "cloudscale_ch"
+ ],
+ "description": "Ansible Collection for cloudscale.ch",
+ "license": [
+ "GPL-3.0-or-later"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/cloudscale-ch/ansible-collection-cloudscale",
+ "documentation": "",
+ "homepage": "https://github.com/cloudscale-ch/ansible-collection-cloudscale",
+ "issues": "https://github.com/cloudscale-ch/ansible-collection-cloudscale/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2375ee2c1c581068f5179252600a956adee07e7bfe05125bb79400fa2b7b216a",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/cloudscale_ch/cloud/README.md b/ansible_collections/cloudscale_ch/cloud/README.md
new file mode 100644
index 000000000..0565c55ed
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/README.md
@@ -0,0 +1,129 @@
+
+![Collection Integration tests](https://github.com/cloudscale-ch/ansible-collection-cloudscale/workflows/Collection%20Integration%20tests/badge.svg)
+[![Codecov](https://img.shields.io/codecov/c/github/cloudscale-ch/ansible-collection-cloudscale)](https://codecov.io/gh/cloudscale-ch/ansible-collection-cloudscale)
+[![License](https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg)](LICENSE)
+
+# Ansible Collection for cloudscale.ch Cloud
+
+This collection provides a series of Ansible modules and plugins for interacting with the [cloudscale.ch](https://www.cloudscale.ch) Cloud.
+
+## Installation
+
+To install the collection hosted in Galaxy:
+
+```bash
+ansible-galaxy collection install cloudscale_ch.cloud
+```
+
+To upgrade to the latest version of the collection:
+
+```bash
+ansible-galaxy collection install cloudscale_ch.cloud --force
+```
+
+## Usage
+
+### Playbooks
+
+To use a module from the cloudscale.ch collection, please reference the full namespace, collection name, and modules name that you want to use:
+
+```yaml
+---
+- name: Using cloudscale.ch collection
+ hosts: localhost
+ tasks:
+ - cloudscale_ch.cloud.server:
+ name: web1
+ image: debian-10
+ flavor: flex-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ server_groups: web-group
+ zone: lpg1
+ api_token: ...
+```
+
+Or you can add the full namespace and collection name in the `collections` element:
+
+```yaml
+---
+- name: Using cloudscale.ch collection
+ hosts: localhost
+ collections:
+ - cloudscale_ch.cloud
+ tasks:
+ - server:
+ name: web1
+ image: debian-10
+ flavor: flex-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ server_groups: web-group
+ zone: lpg1
+ api_token: ...
+```
+
+### Roles
+
+For existing Ansible roles, please also reference the full namespace, collection name, and modules name that are used in tasks instead of just modules name.
+
+### Plugins
+
+To use a plugin, please reference the full namespace, collection name, and plugins name that you want to use:
+
+```yaml
+plugin: cloudscale_ch.cloud.<myplugin>
+```
+
+## Contributing
+
+There are many ways in which you can participate in the project, for example:
+
+- Submit bugs and feature requests, and help us verify them as they are checked in
+- Review source code changes
+- Review the documentation and make pull requests for anything from typos to new content
+- If you are interested in fixing issues and contributing directly to the code base, please see the [CONTRIBUTING](CONTRIBUTING.md) document.
+
+## Releasing
+
+### Prepare a new release
+
+The changelog is managed using the `antsibull` tool. You can install
+it using `pip install antsibull`
+
+1. Update version in galaxy.yml
+2. Update changelog using antsibull
+```
+antsibull-changelog release
+```
+3. Commit changelog and new version
+```
+git commit -m "Release version X.Y.Z" galaxy.yml CHANGELOG.rst changelogs/
+```
+4. Tag the release. Preferably create a GPG signed tag if you have a GPG
+key. Version tags should be prefixed with "v" (otherwise the
+integration tests won't run automatically).
+```
+git tag -s -m "Version X.Y.Z" vX.Y.Z
+```
+5. Push the release and tag
+```
+git push origin master vX.Y.Z
+```
+
+### Release to Ansible Galaxy
+
+After the release is tagged and pushed to Github a release to Ansible
+Galaxy can be created using the release feature in Github:
+
+1. **Wait for integration tests to succeed. They should automatically
+run on new tags.** Only release if they succeed. Otherwise delete the
+tag and fix the issue.
+2. Create a release on Github by going to the release overview and
+ selecting "Draft a new release".
+
+## License
+
+GNU General Public License v3.0
+
+See [COPYING](COPYING) to see the full text.
diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore b/ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore
new file mode 100644
index 000000000..6be6b5331
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore
@@ -0,0 +1 @@
+/.plugin-cache.yaml
diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml b/ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml
new file mode 100644
index 000000000..629ae03da
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml
@@ -0,0 +1,148 @@
+ancestor: null
+releases:
+ 1.1.0:
+ changes:
+ minor_changes:
+ - floating_ip - added tags support (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/16)
+ fragments:
+ - 16-floating_ip_tags.yml
+ modules:
+ - description: Manages objects users on the cloudscale.ch IaaS service
+ name: objects_user
+ namespace: ''
+ release_date: '2020-08-18'
+ 1.2.0:
+ changes:
+ minor_changes:
+ - server_group - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/23).
+ - volume - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/24).
+ fragments:
+ - consolidate-modules.yml
+ modules:
+ - description: Manages networks on the cloudscale.ch IaaS service
+ name: network
+ namespace: ''
+ release_date: '2020-10-13'
+ 1.3.0:
+ changes:
+ minor_changes:
+ - floating_ip - Added an optional name parameter to gain idempotency. The parameter
+ will be required for assigning a new floating IP with release of version 2.0.0
+ (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/43/).
+ - floating_ip - Allow to reserve an IP without assignment to a server (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/31/).
+ fragments:
+ - floating-idempotency.yml
+ - floating-ip_optional_server.yml
+ modules:
+ - description: Manages subnets on the cloudscale.ch IaaS service
+ name: subnet
+ namespace: ''
+ release_date: '2020-11-23'
+ 1.3.1:
+ changes:
+ bugfixes:
+ - Fix inventory plugin failing to launch (https://github.com/cloudscale-ch/ansible-collection-cloudscale/issues/49).
+ minor_changes:
+ - Implemented identical naming support of the same resource type per zone (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/46).
+ fragments:
+ - allow-similar-name-per-zone.yml
+ - fix-inventory-plugin-error.yml
+ release_date: '2021-01-26'
+ 2.0.0:
+ changes:
+ breaking_changes:
+ - floating_ip - ``name`` is required for assigning a new floating IP.
+ fragments:
+ - floating-ip-require-name.yml
+ release_date: '2021-02-02'
+ 2.1.0:
+ changes:
+ deprecated_features:
+ - The aliases ``server_uuids`` and ``server_uuid`` of the servers parameter
+ in the volume module will be removed in version 3.0.0.
+ minor_changes:
+ - Add interface parameter to server module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54).
+ - Rename server_uuids parameter to servers in volume module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54).
+ fragments:
+ - add-interface-parameter-to-server-module.yml
+ - deprecate-server_uuids-volume-module.yml
+ - rename-server_uuids-parameter-to-servers.yml
+ release_date: '2021-02-04'
+ 2.2.0:
+ changes:
+ major_changes:
+ - Add custom_image module
+ minor_changes:
+ - Increase api_timeout to 45
+ - Read CLOUDSCALE_API_TIMEOUT environment variable
+ fragments:
+ - add-custom-image-module.yml
+ - increase-api-timeout-and-use-env.yml
+ modules:
+ - description: Manage custom images on the cloudscale.ch IaaS service
+ name: custom_image
+ namespace: ''
+ release_date: '2021-05-25'
+ 2.2.1:
+ changes:
+ minor_changes:
+ - 'Updated documentation: ``ssh_keys`` is a YAML list, not a string.'
+ release_date: '2022-03-21'
+ 2.2.2:
+ changes:
+ minor_changes:
+ - Fixed inventory documentation.
+ fragments:
+ - fix-sanity.yml
+ release_date: '2022-05-21'
+ 2.2.3:
+ changes:
+ minor_changes:
+ - Fixed a typo in region code.
+ - Fixed various documentation typos.
+ - Streamlined the flavors to the new format ``flex-y-x`` across the related
+ modules and tests.
+ fragments:
+ - 76-region-typos.yml
+ - 78-various-typos.yml
+ - 79-flavor-format.yml
+ release_date: '2022-11-16'
+ 2.2.4:
+ changes:
+ minor_changes:
+ - Add UEFI firmware type option for custom images.
+ fragments:
+ - add-uefi-option.yml
+ release_date: '2023-01-04'
+ 2.3.0:
+ changes:
+ major_changes:
+ - Bump minimum required Ansible version to 2.13.0
+ fragments:
+ - ansible_version_bump.yml
+ modules:
+ - description: Manages load balancers on the cloudscale.ch IaaS service
+ name: load_balancer
+ namespace: ''
+ - description: Manages load balancers on the cloudscale.ch IaaS service
+ name: load_balancer_health_monitor
+ namespace: ''
+ - description: Manages load balancer listeners on the cloudscale.ch IaaS service
+ name: load_balancer_listener
+ namespace: ''
+ - description: Manages load balancer pools on the cloudscale.ch IaaS service
+ name: load_balancer_pool
+ namespace: ''
+ - description: Manages load balancer pool members on the cloudscale.ch IaaS service
+ name: load_balancer_pool_member
+ namespace: ''
+ release_date: '2023-06-05'
+ 2.3.1:
+ changes:
+ bugfixes:
+ - Add missing modules to the "cloudscale_ch.cloud.cloudscale" action group.
+ - Remove outdated Ansible version requirement from the README.
+ fragments:
+ - add-missing-modules-to-action-group.yml
+ - fix-version-requirement-readme.yml
+ release_date: '2023-06-06'
diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml b/ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml
new file mode 100644
index 000000000..151cdf32a
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml
@@ -0,0 +1,29 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Ansible Collection cloudscale.ch
+trivial_section_name: trivial
diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep b/ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep
diff --git a/ansible_collections/cloudscale_ch/cloud/codecov.yml b/ansible_collections/cloudscale_ch/cloud/codecov.yml
new file mode 100644
index 000000000..47fe4ef24
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/codecov.yml
@@ -0,0 +1,8 @@
+---
+coverage:
+ precision: 2
+ round: down
+ range: "70...100"
+
+fixes:
+ - "/ansible_collections/cloudscale_ch/cloud/::"
diff --git a/ansible_collections/cloudscale_ch/cloud/meta/runtime.yml b/ansible_collections/cloudscale_ch/cloud/meta/runtime.yml
new file mode 100644
index 000000000..12882c1d8
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/meta/runtime.yml
@@ -0,0 +1,57 @@
+requires_ansible: '>=2.13.0'
+action_groups:
+ cloudscale:
+ - custom_image
+ - floating_ip
+ - load_balancer
+ - load_balancer_health_monitor
+ - loaad_balancer_listener
+ - load_balancer_pool
+ - load_balancer_pool_member
+ - network
+ - objects_user
+ - server_group
+ - server
+ - subnet
+ - volume
+
+plugin_routing:
+ inventory:
+ cloudscale:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: Deprecated, use cloudscale_ch.cloud.inventory
+ redirect: cloudscale_ch.cloud.inventory
+ doc_fragments:
+ cloudscale:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: Deprecated, use cloudscale_ch.cloud.api_parameters
+ redirect: cloudscale_ch.cloud.api_parameters
+ module_utils:
+ cloudscale:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: Deprecated, use cloudscale_ch.cloud.api
+ redirect: cloudscale_ch.cloud.api
+ modules:
+ cloudscale_floating_ip:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.floating_ip
+ redirect: cloudscale_ch.cloud.floating_ip
+ cloudscale_server:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.server
+ redirect: cloudscale_ch.cloud.server
+ cloudscale_server_group:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.server_group
+ redirect: cloudscale_ch.cloud.server_group
+ cloudscale_volume:
+ deprecation:
+ removal_date: 2021-12-12
+ warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.volume
+ redirect: cloudscale_ch.cloud.volume
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py
new file mode 100644
index 000000000..9ed63eed0
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = '''
+options:
+ api_url:
+ description:
+ - cloudscale.ch API URL.
+ - This can also be passed in the C(CLOUDSCALE_API_URL) environment variable.
+ default: https://api.cloudscale.ch/v1
+ type: str
+ version_added: 1.3.0
+ api_token:
+ description:
+ - cloudscale.ch API token.
+ - This can also be passed in the C(CLOUDSCALE_API_TOKEN) environment variable.
+ required: true
+ type: str
+ api_timeout:
+ description:
+ - Timeout in seconds for calls to the cloudscale.ch API.
+ - This can also be passed in the C(CLOUDSCALE_API_TIMEOUT) environment variable.
+ default: 45
+ type: int
+notes:
+ - All operations are performed using the cloudscale.ch public API v1.
+ - "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)."
+ - A valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at
+ U(https://control.cloudscale.ch).
+'''
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py
new file mode 100644
index 000000000..b55b1900e
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+name: inventory
+author:
+ - Gaudenz Steinlin (@gaudenz)
+short_description: cloudscale.ch inventory source
+description:
+ - Get inventory hosts from cloudscale.ch API
+ - Uses an YAML configuration file ending with either I(cloudscale.yml) or I(cloudscale.yaml) to set parameter values (also see examples).
+extends_documentation_fragment:
+ - constructed
+options:
+ api_token:
+ description:
+ - cloudscale.ch API token.
+ - This can also be passed in the C(CLOUDSCALE_API_TOKEN) environment variable.
+ type: str
+ plugin:
+ description: |
+ Token that ensures this is a source file for the 'cloudscale'
+ plugin.
+ required: True
+ choices: ['cloudscale']
+ inventory_hostname:
+ description: |
+ What to register as the inventory hostname.
+ If set to 'uuid' the uuid of the server will be used and a
+ group will be created for the server name.
+ If set to 'name' the name of the server will be used unless
+ there are more than one server with the same name in which
+ case the 'uuid' logic will be used.
+ type: str
+ choices:
+ - name
+ - uuid
+ default: "name"
+ ansible_host:
+ description: |
+ Which IP address to register as the ansible_host. If the
+ requested value does not exist or this is set to 'none', no
+ ansible_host will be set.
+ type: str
+ choices:
+ - public_v4
+ - public_v6
+ - private
+ - none
+ default: public_v4
+'''
+
+EXAMPLES = r'''
+# cloudscale.yml name ending file in YAML format
+# Example command line: ansible-inventory --list -i inventory_cloudscale.yml
+
+plugin: cloudscale_ch.cloud.inventory
+
+# Example grouping by tag key "project"
+plugin: cloudscale_ch.cloud.inventory
+keyed_groups:
+ - prefix: project
+ key: cloudscale.tags.project
+
+# Example grouping by key "operating_system" lowercased and prefixed with "os"
+plugin: cloudscale_ch.cloud.inventory
+keyed_groups:
+ - prefix: os
+ key: cloudscale.image.operating_system | lower
+'''
+import os
+
+from collections import defaultdict
+from json import loads
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.inventory.group import to_safe_group_name
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+
+iface_type_map = {
+ 'public_v4': ('public', 4),
+ 'public_v6': ('public', 6),
+ 'private': ('private', 4),
+ 'none': (None, None),
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+
+ NAME = 'cloudscale'
+
+ @property
+ def api_url(self):
+ return os.environ.get(
+ 'CLOUDSCALE_API_URL', 'https://api.cloudscale.ch/v1')
+
+ @property
+ def api_token(self):
+ return self.get_option('api_token') \
+ or os.environ.get('CLOUDSCALE_API_TOKEN')
+
+ def _get_server_list(self):
+
+ # Get list of servers from cloudscale.ch API
+ response = open_url(
+ self.api_url + '/servers',
+ headers={'Authorization': 'Bearer %s' % self.api_token}
+ )
+ return loads(response.read())
+
+ def verify_file(self, path):
+ '''
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('cloudscale.yml', 'cloudscale.yaml')):
+ return True
+ self.display.debug(
+ "cloudscale inventory filename must end with 'cloudscale.yml' or 'cloudscale.yaml'"
+ )
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+
+ if not self.api_token:
+ raise AnsibleError('Could not find an API token. Set the '
+ 'CLOUDSCALE_API_TOKEN environment variable.')
+
+ inventory_hostname = self.get_option('inventory_hostname')
+ if inventory_hostname not in ('name', 'uuid'):
+ raise AnsibleError('Invalid value for option inventory_hostname: %s'
+ % inventory_hostname)
+
+ ansible_host = self.get_option('ansible_host')
+ if ansible_host not in iface_type_map:
+ raise AnsibleError('Invalid value for option ansible_host: %s'
+ % ansible_host)
+
+ # Merge servers with the same name
+ firstpass = defaultdict(list)
+ for server in self._get_server_list():
+ firstpass[server['name']].append(server)
+
+ # Add servers to inventory
+ for name, servers in firstpass.items():
+ if len(servers) == 1 and inventory_hostname == 'name':
+ self.inventory.add_host(name)
+ servers[0]['inventory_hostname'] = name
+ else:
+ # Two servers with the same name exist, create a group
+ # with this name and add the servers by UUID
+ group_name = to_safe_group_name(name)
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+ for server in servers:
+ self.inventory.add_host(server['uuid'], group_name)
+ server['inventory_hostname'] = server['uuid']
+
+ # Set variables
+ iface_type, iface_version = iface_type_map[ansible_host]
+ for server in servers:
+ hostname = server.pop('inventory_hostname')
+ if ansible_host != 'none':
+ addresses = [address['address']
+ for interface in server['interfaces']
+ for address in interface['addresses']
+ if interface['type'] == iface_type
+ and address['version'] == iface_version]
+
+ if len(addresses) > 0:
+ self.inventory.set_variable(
+ hostname,
+ 'ansible_host',
+ addresses[0],
+ )
+ self.inventory.set_variable(
+ hostname,
+ 'cloudscale',
+ server,
+ )
+
+ variables = self.inventory.hosts[hostname].get_vars()
+ # Set composed variables
+ self._set_composite_vars(
+ self.get_option('compose'),
+ variables,
+ hostname,
+ self.get_option('strict'),
+ )
+
+ # Add host to composed groups
+ self._add_host_to_composed_groups(
+ self.get_option('groups'),
+ variables,
+ hostname,
+ self.get_option('strict'),
+ )
+
+ # Add host to keyed groups
+ self._add_host_to_keyed_groups(
+ self.get_option('keyed_groups'),
+ variables,
+ hostname,
+ self.get_option('strict'),
+ )
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py b/ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py
new file mode 100644
index 000000000..f7c3e3850
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py
@@ -0,0 +1,379 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from datetime import datetime, timedelta
+from time import sleep
+from copy import deepcopy
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+def cloudscale_argument_spec():
+ return dict(
+ api_url=dict(
+ type='str',
+ fallback=(env_fallback, ['CLOUDSCALE_API_URL']),
+ default='https://api.cloudscale.ch/v1',
+ ),
+ api_token=dict(
+ type='str',
+ fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']),
+ no_log=True,
+ required=True,
+ ),
+ api_timeout=dict(
+ type='int',
+ fallback=(env_fallback, ['CLOUDSCALE_API_TIMEOUT']),
+ default=45,
+ ),
+ )
+
+
+class AnsibleCloudscaleApi(object):
+
+ def __init__(self, module):
+ self._module = module
+
+ self._api_url = module.params['api_url']
+ if not self._api_url.endswith('/'):
+ self._api_url = self._api_url + '/'
+
+ self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']}
+
+ def _get(self, api_call):
+ resp, info = fetch_url(self._module, self._api_url + api_call,
+ headers=self._auth_header,
+ timeout=self._module.params['api_timeout'])
+
+ if info['status'] == 200:
+ return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
+ elif info['status'] == 404:
+ return None
+ else:
+ self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for '
+ '"%s".' % api_call, fetch_url_info=info)
+
+ def _post_or_patch(self, api_call, method, data, filter_none=True):
+ # This helps with tags when we have the full API resource href to update.
+ if self._api_url not in api_call:
+ api_endpoint = self._api_url + api_call
+ else:
+ api_endpoint = api_call
+
+ headers = self._auth_header.copy()
+ if data is not None:
+ # Sanitize data dictionary
+ # Deepcopy: Duplicate the data object for iteration, because
+ # iterating an object and changing it at the same time is insecure
+ for k, v in deepcopy(data).items():
+ if filter_none and v is None:
+ del data[k]
+
+ data = self._module.jsonify(data)
+ headers['Content-type'] = 'application/json'
+
+ resp, info = fetch_url(self._module,
+ api_endpoint,
+ headers=headers,
+ method=method,
+ data=data,
+ timeout=self._module.params['api_timeout'])
+
+ if info['status'] in (200, 201):
+ return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
+ elif info['status'] == 204:
+ return None
+ else:
+ self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for '
+ '"%s".' % (method, api_endpoint), fetch_url_info=info)
+
+ def _post(self, api_call, data=None):
+ return self._post_or_patch(api_call, 'POST', data)
+
+ def _patch(self, api_call, data=None, filter_none=True):
+ return self._post_or_patch(api_call, 'PATCH', data, filter_none)
+
+ def _delete(self, api_call):
+ # api_call might be full href already
+ if self._api_url not in api_call:
+ api_endpoint = self._api_url + api_call
+ else:
+ api_endpoint = api_call
+
+ resp, info = fetch_url(self._module,
+ api_endpoint,
+ headers=self._auth_header,
+ method='DELETE',
+ timeout=self._module.params['api_timeout'])
+
+ if info['status'] == 204:
+ return None
+ else:
+ self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for '
+ '"%s".' % api_endpoint, fetch_url_info=info)
+
+
+class AnsibleCloudscaleBase(AnsibleCloudscaleApi):
+
+ def __init__(
+ self,
+ module,
+ resource_name='',
+ resource_key_uuid='uuid',
+ resource_key_name='name',
+ resource_create_param_keys=None,
+ resource_update_param_keys=None,
+ ):
+ super(AnsibleCloudscaleBase, self).__init__(module)
+ self._result = {
+ 'changed': False,
+ 'diff': dict(
+ before=dict(),
+ after=dict()
+ ),
+ }
+ self._resource_data = dict()
+
+ # The identifier key of the resource, usually 'uuid'
+ self.resource_key_uuid = resource_key_uuid
+
+ # The name key of the resource, usually 'name'
+ self.resource_key_name = resource_key_name
+
+ # The API resource e.g server-group
+ self.resource_name = resource_name
+
+ # List of params used to create the resource
+ self.resource_create_param_keys = resource_create_param_keys or ['name']
+
+ # List of params used to update the resource
+ self.resource_update_param_keys = resource_update_param_keys or ['name']
+
+ # Resource has no name field but tags, we use a defined tag as name
+ self.use_tag_for_name = False
+ self.resource_name_tag = "ansible_name"
+
+ # Constraint Keys to match when query by name
+ self.query_constraint_keys = []
+
+ def pre_transform(self, resource):
+ return resource
+
+ def init_resource(self):
+ return {
+ 'state': "absent",
+ self.resource_key_uuid: self._module.params.get(self.resource_key_uuid) or self._resource_data.get(self.resource_key_uuid),
+ self.resource_key_name: self._module.params.get(self.resource_key_name) or self._resource_data.get(self.resource_key_name),
+ }
+
+ def query(self):
+ # Initialize
+ self._resource_data = self.init_resource()
+
+ # Query by UUID
+ uuid = self._module.params[self.resource_key_uuid]
+ if uuid is not None:
+
+ # network id case
+ if "/" in uuid:
+ uuid = uuid.split("/")[0]
+
+ resource = self._get('%s/%s' % (self.resource_name, uuid))
+ if resource:
+ self._resource_data = resource
+ self._resource_data['state'] = "present"
+
+ # Query by name
+ else:
+ name = self._module.params[self.resource_key_name]
+
+ # Resource has no name field, we use a defined tag as name
+ if self.use_tag_for_name:
+ resources = self._get('%s?tag:%s=%s' % (self.resource_name, self.resource_name_tag, name))
+ else:
+ resources = self._get('%s' % self.resource_name)
+
+ matching = []
+ for resource in resources:
+ if self.use_tag_for_name:
+ resource[self.resource_key_name] = resource['tags'].get(self.resource_name_tag)
+
+ # Skip resource if constraints is not given e.g. in case of floating_ip the ip_version differs
+ for constraint_key in self.query_constraint_keys:
+ if self._module.params[constraint_key] is not None:
+ if constraint_key == 'zone':
+ resource_value = resource['zone']['slug']
+ else:
+ resource_value = resource[constraint_key]
+
+ if resource_value != self._module.params[constraint_key]:
+ break
+ else:
+ if resource[self.resource_key_name] == name:
+ matching.append(resource)
+
+ # Fail on more than one resource with identical name
+ if len(matching) > 1:
+ self._module.fail_json(
+ msg="More than one %s resource with '%s' exists: %s. "
+ "Use the '%s' parameter to identify the resource." % (
+ self.resource_name,
+ self.resource_key_name,
+ name,
+ self.resource_key_uuid
+ )
+ )
+ elif len(matching) == 1:
+ self._resource_data = matching[0]
+ self._resource_data['state'] = "present"
+
+ return self.pre_transform(self._resource_data)
+
+ def create(self, resource, data=None):
+ # Fail if UUID/ID was provided but the resource was not found on state=present.
+ uuid = self._module.params.get(self.resource_key_uuid)
+ if uuid is not None:
+ self._module.fail_json(msg="The resource with UUID '%s' was not found "
+ "and we would create a new one with different UUID, "
+ "this is probably not want you have asked for." % uuid)
+
+ self._result['changed'] = True
+
+ if not data:
+ data = dict()
+
+ for param in self.resource_create_param_keys:
+ data[param] = self._module.params.get(param)
+
+ self._result['diff']['before'] = deepcopy(resource)
+ self._result['diff']['after'] = deepcopy(resource)
+ self._result['diff']['after'].update(deepcopy(data))
+ self._result['diff']['after'].update({
+ 'state': "present",
+ })
+
+ if not self._module.check_mode:
+ resource = self._post(self.resource_name, data)
+ resource = self.pre_transform(resource)
+ resource['state'] = "present"
+ return resource
+
+ def wait_for_state(self, check_parameter, allowed_states):
+ start = datetime.now()
+ timeout = self._module.params['api_timeout'] * 2
+ while datetime.now() - start < timedelta(seconds=timeout):
+ info = self.query()
+ if info.get(check_parameter) in allowed_states:
+ return info
+ sleep(1)
+
+ # Timeout reached
+ name_uuid = info.get('name') or self._module.params.get('name') or \
+ self._module.params.get('uuid')
+
+ msg = "Timeout while waiting for a state change for resource %s to states %s" % (name_uuid, allowed_states)
+
+ self._module.fail_json(msg=msg)
+
+ def update(self, resource):
+ updated = False
+ for param in self.resource_update_param_keys:
+ updated = self._param_updated(param, resource) or updated
+
+ # Refresh if resource was updated in live mode
+ if updated and not self._module.check_mode:
+ resource = self.query()
+ return resource
+
+ def present(self):
+ resource = self.query()
+
+ if self.use_tag_for_name:
+ name_tag_value = self._module.params[self.resource_key_name] or resource.get('tags', dict()).get(self.resource_name_tag)
+ if name_tag_value:
+ self._module.params['tags'] = self._module.params['tags'] or dict()
+ self._module.params['tags'].update({
+ self.resource_name_tag: name_tag_value
+ })
+
+ if resource['state'] == "absent":
+ resource = self.create(resource)
+ else:
+ resource = self.update(resource)
+ return self.get_result(resource)
+
+ def absent(self):
+ resource = self.query()
+ if resource['state'] != "absent":
+ self._result['changed'] = True
+ self._result['diff']['before'] = deepcopy(resource)
+ self._result['diff']['after'] = self.init_resource()
+
+ if not self._module.check_mode:
+ href = resource.get('href')
+ if not href:
+ self._module.fail_json(msg='Unable to delete %s, no href found.')
+
+ self._delete(href)
+ resource['state'] = "absent"
+ return self.get_result(resource)
+
+ def find_difference(self, key, resource, param):
+ is_different = False
+
+ # If it looks like a stub
+ if isinstance(resource[key], dict) and 'href' in resource[key]:
+ uuid = resource[key].get('href', '').split('/')[-1]
+ if param != uuid:
+ is_different = True
+
+ elif param != resource[key]:
+ is_different = True
+
+ return is_different
+
+ def _param_updated(self, key, resource):
+ param = self._module.params.get(key)
+ if param is None:
+ return False
+
+ if not resource or key not in resource:
+ return False
+
+ is_different = self.find_difference(key, resource, param)
+
+ if is_different:
+ self._result['changed'] = True
+
+ patch_data = {
+ key: param
+ }
+
+ self._result['diff']['before'].update({key: resource[key]})
+ self._result['diff']['after'].update(patch_data)
+
+ if not self._module.check_mode:
+ href = resource.get('href')
+ if not href:
+ self._module.fail_json(msg='Unable to update %s, no href found.' % key)
+
+ self._patch(href, patch_data)
+ return True
+ return False
+
+ def get_result(self, resource):
+ if resource:
+ for k, v in resource.items():
+ self._result[k] = v
+
+ # Transform the name tag to a name field
+ if self.use_tag_for_name:
+ self._result['name'] = self._result.get('tags', dict()).pop(self.resource_name_tag, None)
+
+ return self._result
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py
new file mode 100644
index 000000000..61e2f77f7
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Ciril Troxler <ciril.troxler@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: custom_image
+short_description: Manage custom images on the cloudscale.ch IaaS service
+description:
+ - Import, modify and delete custom images.
+notes:
+ - To import a new custom-image the I(url) and I(name) options are required.
+author:
+ - Ciril Troxler (@ctx)
+ - Gaudenz Steinlin (@gaudenz)
+version_added: 2.2.0
+options:
+ url:
+ description:
+ - The URL used to download the image.
+ type: str
+ force_retry:
+ description:
+ - Retry the image import even if a failed import using the same name and
+ URL already exists. This is necessary to recover from download errors.
+ default: false
+ type: bool
+ name:
+ description:
+ - The human readable name of the custom image. Either name or UUID must
+ be present to change an existing image.
+ type: str
+ uuid:
+ description:
+ - The unique identifier of the custom image import. Either name or UUID
+ must be present to change an existing image.
+ type: str
+ slug:
+ description:
+ - A string identifying the custom image for use within the API.
+ type: str
+ user_data_handling:
+ description:
+ - How user_data will be handled when creating a server. There are
+ currently two options, "pass-through" and "extend-cloud-config".
+ type: str
+ choices: [ pass-through, extend-cloud-config ]
+ zones:
+ description:
+ - Specify zones in which the custom image will be available (e.g. C(lpg1)
+ or C(rma1)).
+ type: list
+ elements: str
+ source_format:
+ description:
+ - The file format of the image referenced in the url. Currently only raw
+ is supported.
+ type: str
+ firmware_type:
+ description:
+ - The firmware type that will be used for servers created
+ with this image.
+ type: str
+ choices: [ bios, uefi ]
+ default: bios
+ tags:
+ description:
+ - The tags assigned to the custom image.
+ type: dict
+ state:
+ description: State of the coustom image.
+ choices: [ present, absent ]
+ default: present
+ type: str
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = r'''
+- name: Import custom image
+ cloudscale_ch.cloud.custom_image:
+ name: "My Custom Image"
+ url: https://ubuntu.com/downloads/hirsute.img
+ slug: my-custom-image
+ user_data_handling: extend-cloud-config
+ zones: lpg1
+ tags:
+ project: luna
+ state: present
+ register: my_custom_image
+
+- name: Wait until import succeeded
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ my_custom_image.uuid }}"
+ retries: 15
+ delay: 5
+ register: image
+ until: image.import_status == 'success'
+ failed_when: image.import_status == 'failed'
+
+- name: Import custom image and wait until import succeeded
+ cloudscale_ch.cloud.custom_image:
+ name: "My Custom Image"
+ url: https://ubuntu.com/downloads/hirsute.img
+ slug: my-custom-image
+ user_data_handling: extend-cloud-config
+ zones: lpg1
+ tags:
+ project: luna
+ state: present
+ retries: 15
+ delay: 5
+ register: image
+ until: image.import_status == 'success'
+ failed_when: image.import_status == 'failed'
+
+- name: Import custom image with UEFI firmware type
+ cloudscale_ch.cloud.custom_image:
+ name: "My Custom UEFI Image"
+ url: https://ubuntu.com/downloads/hirsute.img
+ slug: my-custom-uefi-image
+ user_data_handling: extend-cloud-config
+ zones: lpg1
+ firmware_type: uefi
+ tags:
+ project: luna
+ state: present
+ register: my_custom_image
+
+- name: Update custom image
+ cloudscale_ch.cloud.custom_image:
+ name: "My Custom Image"
+ slug: my-custom-image
+ user_data_handling: extend-cloud-config
+ tags:
+ project: luna
+ state: present
+
+- name: Delete custom image
+ cloudscale_ch.cloud.custom_image:
+ uuid: '{{ my_custom_image.uuid }}'
+ state: absent
+
+- name: List all custom images
+ uri:
+ url: 'https://api.cloudscale.ch/v1/custom-images'
+ headers:
+ Authorization: 'Bearer {{ query("env", "CLOUDSCALE_API_TOKEN") }}'
+ status_code: 200
+ register: image_list
+- name: Search the image list for all images with name 'My Custom Image'
+ set_fact:
+ my_custom_images: '{{ image_list.json | selectattr("name","search", "My Custom Image" ) }}'
+'''
+
+RETURN = r'''
+href:
+ description: The API URL to get details about this resource.
+ returned: success when state == present
+ type: str
+ sample: https://api.cloudscale.ch/v1/custom-imges/11111111-1864-4608-853a-0771b6885a3a
+uuid:
+ description: The unique identifier of the custom image.
+ returned: success
+ type: str
+ sample: 11111111-1864-4608-853a-0771b6885a3a
+name:
+ description: The human readable name of the custom image.
+ returned: success
+ type: str
+ sample: alan
+created_at:
+ description: The creation date and time of the resource.
+ returned: success
+ type: str
+ sample: "2020-05-29T13:18:42.511407Z"
+slug:
+ description: A string identifying the custom image for use within the API.
+ returned: success
+ type: str
+ sample: foo
+checksums:
+ description: The checksums of the custom image as key and value pairs. The
+ algorithm (e.g. sha256) name is in the key and the checksum in the value.
+ The set of algorithms used might change in the future.
+ returned: success
+ type: dict
+ sample: {
+ "md5": "5b3a1f21cde154cfb522b582f44f1a87",
+ "sha256": "5b03bcbd00b687e08791694e47d235a487c294e58ca3b1af704120123aa3f4e6"
+ }
+user_data_handling:
+ description: How user_data will be handled when creating a server. There are
+ currently two options, "pass-through" and "extend-cloud-config".
+ returned: success
+ type: str
+ sample: "pass-through"
+tags:
+ description: Tags assosiated with the custom image.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+import_status:
+ description: Shows the progress of an import. Values are one of
+ "started", "in_progress", "success" or "failed".
+ returned: success
+ type: str
+ sample: "in_progress"
+error_message:
+ description: Error message in case of a failed import.
+ returned: success
+ type: str
+ sample: "Expected HTTP 200, got HTTP 403"
+state:
+ description: The current status of the custom image.
+ returned: success
+ type: str
+ sample: present
+'''
+
+
+from ansible.module_utils.basic import (
+ AnsibleModule,
+)
+from ansible.module_utils.urls import (
+ fetch_url
+)
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+from ansible.module_utils._text import (
+ to_text
+)
+
+
+class AnsibleCloudscaleCustomImage(AnsibleCloudscaleBase):
+
+ def _transform_import_to_image(self, imp):
+ # Create a stub image from the import
+ img = imp.get('custom_image', {})
+ return {
+ 'href': img.get('href'),
+ 'uuid': imp['uuid'],
+ 'name': img.get('name'),
+ 'created_at': None,
+ 'size_gb': None,
+ 'checksums': None,
+ 'tags': imp['tags'],
+ 'url': imp['url'],
+ 'import_status': imp['status'],
+ 'error_message': imp.get('error_message', ''),
+ # Even failed image imports are reported as present. This then
+ # represents a failed import resource.
+ 'state': 'present',
+ # These fields are not present on the import, assume they are
+ # unchanged from the module parameters
+ 'user_data_handling': self._module.params['user_data_handling'],
+ 'zones': self._module.params['zones'],
+ 'slug': self._module.params['slug'],
+ 'firmware_type': self._module.params['firmware_type'],
+ }
+
+ # This method can be replaced by calling AnsibleCloudscaleBase._get form
+ # AnsibleCloudscaleCustomImage._get once the API bug is fixed.
+ def _get_url(self, url):
+
+ response, info = fetch_url(self._module,
+ url,
+ headers=self._auth_header,
+ method='GET',
+ timeout=self._module.params['api_timeout'])
+
+ if info['status'] == 200:
+ response = self._module.from_json(
+ to_text(response.read(),
+ errors='surrogate_or_strict'),
+ )
+ elif info['status'] == 404:
+ # Return None to be compatible with AnsibleCloudscaleBase._get
+ response = None
+ elif info['status'] == 500 and url.startswith(self._api_url + self.resource_name + '/import/'):
+ # Workaround a bug in the cloudscale.ch API which wrongly returns
+ # 500 instead of 404
+ response = None
+ else:
+ self._module.fail_json(
+ msg='Failure while calling the cloudscale.ch API with GET for '
+ '"%s"' % url,
+ fetch_url_info=info,
+ )
+
+ return response
+
+ def _get(self, api_call):
+
+ # Split api_call into components
+ api_url, call_uuid = api_call.split(self.resource_name)
+
+ # If the api_call does not contain the API URL
+ if not api_url:
+ api_url = self._api_url
+
+ # Fetch image(s) from the regular API endpoint
+ response = self._get_url(api_url + self.resource_name + call_uuid) or []
+
+ # Additionally fetch image(s) from the image import API endpoint
+ response_import = self._get_url(
+ api_url + self.resource_name + '/import' + call_uuid,
+ ) or []
+
+ # No image was found
+ if call_uuid and response == [] and response_import == []:
+ return None
+
+ # Convert single image responses (call with UUID) into a list
+ if call_uuid and response:
+ response = [response]
+ if call_uuid and response_import:
+ response_import = [response_import]
+
+ # Transform lists into UUID keyed dicts
+ response = dict([(i['uuid'], i) for i in response])
+ response_import = dict([(i['uuid'], i) for i in response_import])
+
+ # Filter the import list so that successfull and in_progress imports
+ # shadow failed imports
+ response_import_filtered = dict([(k, v) for k, v
+ in response_import.items()
+ if v['status'] in ('success',
+ 'in_progress')])
+ # Only add failed imports if no import with the same name exists
+ # Only add the last failed import in the list (there is no timestamp on
+ # imports)
+ import_names = set([v['custom_image']['name'] for k, v
+ in response_import_filtered.items()])
+ for k, v in reversed(list(response_import.items())):
+ name = v['custom_image']['name']
+ if (v['status'] == 'failed' and name not in import_names):
+ import_names.add(name)
+ response_import_filtered[k] = v
+
+ # Merge import list into image list
+ for uuid, imp in response_import_filtered.items():
+ if uuid in response:
+ # Merge addtional fields only present on the import
+ response[uuid].update(
+ url=imp['url'],
+ import_status=imp['status'],
+ error_message=imp.get('error_message', ''),
+ )
+ else:
+ response[uuid] = self._transform_import_to_image(imp)
+
+ if not call_uuid:
+ return response.values()
+ else:
+ return next(iter(response.values()))
+
+ def _post(self, api_call, data=None):
+ # Only new image imports are supported, no direct POST call to image
+ # resources are supported by the API
+ if not api_call.endswith('custom-images'):
+ self._module.fail_json(msg="Error: Bad api_call URL.")
+ # Custom image imports use a different endpoint
+ api_call += '/import'
+
+ if self._module.params['url']:
+ return self._transform_import_to_image(
+ self._post_or_patch("%s" % api_call, 'POST', data),
+ )
+ else:
+ self._module.fail_json(msg="Cannot import a new image without url.")
+
+ def present(self):
+ resource = self.query()
+
+ # If the module passes the firmware_type argument,
+ # and the module argument and API response are not the same for
+ # argument firmware_type.
+ if (resource.get('firmware_type') is not None
+ and resource.get('firmware_type') !=
+ self._module.params['firmware_type']):
+ # Custom error if the module tries to change the firmware_type.
+ msg = "Cannot change firmware type of an existing custom image"
+ self._module.fail_json(msg)
+
+ if resource['state'] == "absent":
+ resource = self.create(resource)
+ else:
+ # If this is a failed upload and the URL changed or the "force_retry"
+ # parameter is used, create a new image import.
+ if (resource.get('import_status') == 'failed'
+ and (resource['url'] != self._module.params['url']
+ or self._module.params['force_retry'])):
+ resource = self.create(resource)
+ else:
+ resource = self.update(resource)
+
+ return self.get_result(resource)
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ slug=dict(type='str'),
+ url=dict(type='str'),
+ force_retry=dict(type='bool', default=False),
+ user_data_handling=dict(type='str',
+ choices=('pass-through',
+ 'extend-cloud-config')),
+ uuid=dict(type='str'),
+ firmware_type=dict(type='str',
+ choices=('bios',
+ 'uefi'),
+ default=('bios')),
+ tags=dict(type='dict'),
+ state=dict(type='str', default='present',
+ choices=('present', 'absent')),
+ zones=dict(type='list', elements='str'),
+ source_format=dict(type='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('name', 'uuid'),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_custom_image = AnsibleCloudscaleCustomImage(
+ module,
+ resource_name='custom-images',
+ resource_key_uuid='uuid',
+ resource_key_name='name',
+ resource_create_param_keys=[
+ 'name',
+ 'slug',
+ 'url',
+ 'user_data_handling',
+ 'firmware_type',
+ 'tags',
+ 'zones',
+ 'source_format',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'slug',
+ 'user_data_handling',
+ 'firmware_type',
+ 'tags',
+ ],
+ )
+
+ if module.params['state'] == "absent":
+ result = cloudscale_custom_image.absent()
+ else:
+ result = cloudscale_custom_image.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py
new file mode 100644
index 000000000..7f578d18c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: floating_ip
+short_description: Manages floating IPs on the cloudscale.ch IaaS service
+description:
+ - Create, assign and delete floating IPs on the cloudscale.ch IaaS service.
+notes:
+ - Once a floating_ip is created, all parameters except C(server), C(reverse_ptr) and C(tags) are read-only.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - Denis Krienbühl (@href)
+ - René Moser (@resmo)
+version_added: 1.0.0
+options:
+ network:
+ description:
+ - Floating IP address to change.
+ - One of I(network) or I(name) is required to identify the floating IP.
+ aliases: [ ip ]
+ type: str
+ name:
+ description:
+ - Name to identifiy the floating IP address for idempotency.
+ - One of I(network) or I(name) is required to identify the floating IP.
+ - Required for assigning a new floating IP.
+ version_added: 1.3.0
+ type: str
+ state:
+ description:
+ - State of the floating IP.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ ip_version:
+ description:
+ - IP protocol version of the floating IP.
+ - Required when assigning a new floating IP.
+ choices: [ 4, 6 ]
+ type: int
+ server:
+ description:
+ - UUID of the server assigned to this floating IP.
+ type: str
+ type:
+ description:
+ - The type of the floating IP.
+ choices: [ regional, global ]
+ type: str
+ default: regional
+ region:
+ description:
+ - Region in which the floating IP resides (e.g. C(lpg) or C(rma)).
+ If omitted, the region of the project default zone is used.
+ This parameter must be omitted if I(type) is set to C(global).
+ type: str
+ prefix_length:
+ description:
+ - Only valid if I(ip_version) is 6.
+ - Prefix length for the IPv6 network. Currently only a prefix of /56 can be requested. If no I(prefix_length) is present, a
+ single address is created.
+ choices: [ 56 ]
+ type: int
+ reverse_ptr:
+ description:
+ - Reverse PTR entry for this address.
+ - You cannot set a reverse PTR entry for IPv6 floating networks. Reverse PTR entries are only allowed for single addresses.
+ type: str
+ tags:
+ description:
+ - Tags associated with the floating IP. Set this to C({}) to clear any tags.
+ type: dict
+ version_added: 1.1.0
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Request a new floating IP without assignment to a server
+- name: Request a floating IP
+ cloudscale_ch.cloud.floating_ip:
+ name: IP to my server
+ ip_version: 4
+ reverse_ptr: my-server.example.com
+ api_token: xxxxxx
+
+# Request a new floating IP with assignment
+- name: Request a floating IP
+ cloudscale_ch.cloud.floating_ip:
+ name: web
+ ip_version: 4
+ server: 47cec963-fcd2-482f-bdb6-24461b2d47b1
+ reverse_ptr: my-server.example.com
+ api_token: xxxxxx
+
+# Assign an existing floating IP to a different server by its IP address
+- name: Move floating IP to backup server
+ cloudscale_ch.cloud.floating_ip:
+ ip: 192.0.2.123
+ server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ api_token: xxxxxx
+
+# Assign an existing floating IP to a different server by name
+- name: Move floating IP to backup server
+ cloudscale_ch.cloud.floating_ip:
+ name: IP to my server
+ server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ api_token: xxxxxx
+
+# Request a new floating IPv6 network
+- name: Request a floating IP
+ cloudscale_ch.cloud.floating_ip:
+ name: IPv6 to my server
+ ip_version: 6
+ prefix_length: 56
+ server: 47cec963-fcd2-482f-bdb6-24461b2d47b1
+ api_token: xxxxxx
+ region: lpg1
+
+# Assign an existing floating network to a different server
+- name: Move floating IP to backup server
+ cloudscale_ch.cloud.floating_ip:
+ ip: '{{ floating_ip.ip }}'
+ server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ api_token: xxxxxx
+
+# Remove a floating IP
+- name: Release floating IP
+ cloudscale_ch.cloud.floating_ip:
+ ip: 192.0.2.123
+ state: absent
+ api_token: xxxxxx
+
+# Remove a floating IP by name
+- name: Release floating IP
+ cloudscale_ch.cloud.floating_ip:
+ name: IP to my server
+ state: absent
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+name:
+ description: The name of the floating IP.
+ returned: success
+ type: str
+ sample: my floating ip
+ version_added: 1.3.0
+href:
+ description: The API URL to get details about this floating IP.
+ returned: success when state == present
+ type: str
+ sample: https://api.cloudscale.ch/v1/floating-ips/2001:db8::cafe
+network:
+ description: The CIDR notation of the network that is routed to your server.
+ returned: success
+ type: str
+ sample: 2001:db8::cafe/128
+next_hop:
+ description: Your floating IP is routed to this IP address.
+ returned: success when state == present
+ type: str
+ sample: 2001:db8:dead:beef::42
+reverse_ptr:
+ description: The reverse pointer for this floating IP address.
+ returned: success when state == present
+ type: str
+ sample: 185-98-122-176.cust.cloudscale.ch
+server:
+ description: The floating IP is routed to this server.
+ returned: success when state == present
+ type: str
+ sample: 47cec963-fcd2-482f-bdb6-24461b2d47b1
+ip:
+ description: The floating IP address.
+ returned: success when state == present
+ type: str
+ sample: 185.98.122.176
+region:
+ description: The region of the floating IP.
+ returned: success when state == present
+ type: dict
+ sample: {'slug': 'lpg'}
+state:
+ description: The current status of the floating IP.
+ returned: success
+ type: str
+ sample: present
+tags:
+ description: Tags assosiated with the floating IP.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+ version_added: 1.1.0
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+
+class AnsibleCloudscaleFloatingIp(AnsibleCloudscaleBase):
+
+ def __init__(self, module):
+ super(AnsibleCloudscaleFloatingIp, self).__init__(
+ module=module,
+ resource_key_uuid='network',
+ resource_name='floating-ips',
+ resource_create_param_keys=[
+ 'ip_version',
+ 'server',
+ 'prefix_length',
+ 'reverse_ptr',
+ 'type',
+ 'region',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'server',
+ 'reverse_ptr',
+ 'tags',
+ ],
+ )
+ self.use_tag_for_name = True
+ self.query_constraint_keys = ['ip_version']
+
+ def pre_transform(self, resource):
+ if 'server' in resource and isinstance(resource['server'], dict):
+ resource['server'] = resource['server']['uuid']
+ return resource
+
+ def create(self, resource):
+ # Fail when missing params for creation
+ self._module.fail_on_missing_params(['ip_version', 'name'])
+ return super(AnsibleCloudscaleFloatingIp, self).create(resource)
+
+ def get_result(self, resource):
+ network = resource.get('network')
+ if network:
+ self._result['ip'] = network.split('/')[0]
+ return super(AnsibleCloudscaleFloatingIp, self).get_result(resource)
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ state=dict(default='present', choices=('present', 'absent'), type='str'),
+ network=dict(aliases=('ip',), type='str'),
+ ip_version=dict(choices=(4, 6), type='int'),
+ server=dict(type='str'),
+ type=dict(type='str', choices=('regional', 'global'), default='regional'),
+ region=dict(type='str'),
+ prefix_length=dict(choices=(56,), type='int'),
+ reverse_ptr=dict(type='str'),
+ tags=dict(type='dict'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('network', 'name'),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_floating_ip = AnsibleCloudscaleFloatingIp(module)
+
+ if module.params['state'] == 'absent':
+ result = cloudscale_floating_ip.absent()
+ else:
+ result = cloudscale_floating_ip.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer.py
new file mode 100644
index 000000000..4e5682979
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2023, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright: (c) 2023, Kenneth Joss <kenneth.joss@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: load_balancer
+short_description: Manages load balancers on the cloudscale.ch IaaS service
+description:
+ - Get, create, update, delete load balancers on the cloudscale.ch IaaS service.
+notes:
+ - If I(uuid) option is provided, it takes precedence over I(name) for load balancer selection. This allows to update the load balancers's name.
+ - If no I(uuid) option is provided, I(name) is used for load balancer selection. If more than one load balancer with this name exists, execution is aborted.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - Kenneth Joss (@k-304)
+version_added: "2.3.0"
+options:
+ state:
+ description:
+ - State of the load balancer.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ name:
+ description:
+ - Name of the load balancer.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ uuid:
+ description:
+ - UUID of the load balancer.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ flavor:
+ description:
+ - Flavor of the load balancer.
+ default: lb-standard
+ type: str
+ vip_addresses:
+ description:
+ - See the [API documentation](https://www.cloudscale.ch/en/api/v1#vip_addresses-attribute-specification) for details about this parameter.
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - Create a VIP address on the subnet identified by this UUID.
+ type: str
+ address:
+ description:
+ - Use this address.
+ - Must be in the same range as subnet.
+ - If empty, a radom address will be used.
+ type: str
+ zone:
+ description:
+ - Zone in which the load balancer resides (e.g. C(lpg1) or C(rma1)).
+ type: str
+ tags:
+ description:
+ - Tags assosiated with the load balancer. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create and start a load balancer
+- name: Start cloudscale.ch load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: my-shiny-cloudscale-load-balancer
+ flavor: lb-standard
+ zone: rma1
+ tags:
+ project: my project
+ api_token: xxxxxx
+
+# Create and start a load balancer with specific subnet
+- name: Start cloudscale.ch load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: my-shiny-cloudscale-load-balancer
+ flavor: lb-standard
+ vip_addresses:
+ - subnet: d7b82c9b-5900-436c-9296-e94dca01c7a0
+ address: 172.25.12.1
+ zone: lpg1
+ tags:
+ project: my project
+ api_token: xxxxxx
+
+# Get load balancer facts by name
+- name: Get facts of a load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: my-shiny-cloudscale-load-balancer
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+href:
+ description: API URL to get details about this load balancer
+ returned: success when not state == absent
+ type: str
+ sample: https://api.cloudscale.ch/v1/load-balancers/0f62e0a7-f459-4fc4-9c25-9e57b6cb4b2f
+uuid:
+ description: The unique identifier for this load balancer
+ returned: success
+ type: str
+ sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
+name:
+ description: The display name of the load balancer
+ returned: success
+ type: str
+ sample: web-lb
+created_at:
+ description: The creation date and time of the load balancer
+ returned: success when not state == absent
+ type: str
+ sample: "2023-02-07T15:32:02.308041Z"
+status:
+ description: The current operational status of the load balancer
+ returned: success
+ type: str
+ sample: running
+state:
+ description: The current state of the load balancer
+ returned: success
+ type: str
+ sample: present
+zone:
+ description: The zone used for this load balancer
+ returned: success when not state == absent
+ type: dict
+ sample: { 'slug': 'lpg1' }
+flavor:
+ description: The flavor that has been used for this load balancer
+ returned: success when not state == absent
+ type: list
+ sample: { "slug": "lb-standard", "name": "LB-Standard" }
+vip_addresses:
+ description: List of vip_addresses for this load balancer
+ returned: success when not state == absent
+ type: dict
+ sample: [ {"version": "4", "address": "192.0.2.110",
+ "subnet": [
+ "href": "https://api.cloudscale.ch/v1/subnets/92c70b2f-99cb-4811-8823-3d46572006e4",
+ "uuid": "92c70b2f-99cb-4811-8823-3d46572006e4",
+ "cidr": "192.0.2.0/24"
+ ]} ]
+tags:
+ description: Tags assosiated with the load balancer
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+ALLOWED_STATES = ('present',
+ 'absent',
+ )
+
+
+class AnsibleCloudscaleLoadBalancer(AnsibleCloudscaleBase):
+
+ def __init__(self, module):
+ super(AnsibleCloudscaleLoadBalancer, self).__init__(
+ module,
+ resource_name='load-balancers',
+ resource_create_param_keys=[
+ 'name',
+ 'flavor',
+ 'zone',
+ 'vip_addresses',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'tags',
+ ],
+ )
+
+ def create(self, resource, data=None):
+ super().create(resource)
+ if not self._module.check_mode:
+ resource = self.wait_for_state('status', ('running', ))
+ return resource
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ flavor=dict(type='str', default='lb-standard'),
+ zone=dict(type='str'),
+ vip_addresses=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ subnet=dict(type='str'),
+ address=dict(type='str'),
+ ),
+ ),
+ tags=dict(type='dict'),
+ state=dict(type='str', default='present', choices=ALLOWED_STATES),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(),
+ required_one_of=(('name', 'uuid'),),
+ required_if=(('state', 'present', ('name',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_load_balancer = AnsibleCloudscaleLoadBalancer(module)
+ cloudscale_load_balancer.query_constraint_keys = [
+ 'zone',
+ ]
+
+ if module.params['state'] == "absent":
+ result = cloudscale_load_balancer.absent()
+ else:
+ result = cloudscale_load_balancer.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_health_monitor.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_health_monitor.py
new file mode 100644
index 000000000..cf99e0cd3
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_health_monitor.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2023, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright: (c) 2023, Kenneth Joss <kenneth.joss@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: load_balancer_health_monitor
+short_description: Manages load balancers on the cloudscale.ch IaaS service
+description:
+ - Get, create, update, delete health monitors on the cloudscale.ch IaaS service.
+notes:
+ - Health monitors do not have names. I(uuid)'s are used to reference a health monitors.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - Kenneth Joss (@k-304)
+version_added: "2.3.0"
+options:
+ state:
+ description:
+ - State of the load balancer health monitor.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ uuid:
+ description:
+ - UUID of the load balancer health monitor.
+ type: str
+ pool:
+ description:
+ - The pool of the health monitor.
+ type: str
+ delay_s:
+ description:
+ - The delay between two successive checks in seconds.
+ type: int
+ timeout_s:
+ description:
+ - The maximum time allowed for an individual check in seconds.
+ type: int
+ up_threshold:
+ description:
+ - The number of checks that need to be successful before the monitor_status of a pool member changes to "up".
+ type: int
+ down_threshold:
+ description:
+ - The number of checks that need to fail before the monitor_status of a pool member changes to "down".
+ type: int
+ type:
+ description:
+ - The type of the health monitor.
+ - See the [API documentation](https://www.cloudscale.ch/en/api/v1#create-a-health-monitor) for allowed options.
+ type: str
+ http:
+ description:
+ - Advanced options for health monitors with type "http" or "https".
+ type: dict
+ suboptions:
+ expected_codes:
+ description:
+ - The HTTP status codes allowed for a check to be considered successful.
+ - See the [API documentation](https://www.cloudscale.ch/en/api/v1#http-attribute-specification) for details.
+ type: list
+ elements: str
+ method:
+ description:
+ - The HTTP method used for the check.
+ type: str
+ url_path:
+ description:
+ - The URL used for the check.
+ type: str
+ version:
+ description:
+ - The HTTP version used for the check.
+ type: str
+ host:
+ description:
+ - The server name in the HTTP Host header used for the check.
+ - Requires version to be set to "1.1".
+ type: str
+ tags:
+ description:
+ - Tags assosiated with the load balancer. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create a simple health monitor for a pool
+- name: Create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: 'swimming-pool'
+ load_balancer: '3d41b118-f95c-4897-ad74-2260fea783fc'
+ algorithm: 'round_robin'
+ protocol: 'tcp'
+ api_token: xxxxxx
+ register: load_balancer_pool
+
+- name: Create a load balancer health monitor (ping)
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool.uuid }}'
+ type: 'ping'
+ api_token: xxxxxx
+ register: load_balancer_health_monitor
+
+# Get load balancer health monitor facts by UUID
+- name: Get facts of a load balancer health monitor by UUID
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ api_token: xxxxxx
+
+# Update a health monitor
+- name: Update HTTP method of a load balancer health monitor from GET to CONNECT
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor_http.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'CONNECT'
+ url_path: '/'
+ version: '1.1'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+ register: load_balancer_health_monitor
+'''
+
+RETURN = '''
+href:
+ description: API URL to get details about this load balancer health monitor
+ returned: success when not state == absent
+ type: str
+ sample: https://api.cloudscale.ch/v1/load-balancers/health-monitors/ee4952d4-2eba-4dec-8957-7911b3ce245b
+uuid:
+ description: The unique identifier for this load balancer health monitor
+ returned: success
+ type: str
+ sample: ee4952d4-2eba-4dec-8957-7911b3ce245b
+created_at:
+ description: The creation date and time of the load balancer health monitor
+ returned: success when not state == absent
+ type: str
+ sample: "2023-02-22T09:55:38.285018Z"
+pool:
+ description: The pool of the health monitor
+ returned: success when not state == absent
+ type: dict
+ sample: [
+ "href": "https://api.cloudscale.ch/v1/load-balancers/pools/618a6cc8-d757-4fab-aa10-d49dc47e667b",
+ "uuid": "618a6cc8-d757-4fab-aa10-d49dc47e667b",
+ "name": "swimming pool"
+ ]
+delay_s:
+ description: The delay between two successive checks in seconds
+ returned: success when not state == absent
+ type: int
+ sample: 2
+timeout_s:
+ description: The maximum time allowed for an individual check in seconds
+ returned: success when not state == absent
+ type: int
+ sample: 1
+up_threshold:
+ description: The number of checks that need to be successful before the monitor_status of a pool member changes to "up"
+ returned: success when not state == absent
+ type: int
+ sample: 2
+down_threshold:
+ description: The number of checks that need to fail before the monitor_status of a pool member changes to "down"
+ returned: success when not state == absent
+ type: int
+ sample: 3
+type:
+ description: The type of the health monitor
+ returned: success when not state == absent
+ type: str
+http:
+ description: Advanced options for health monitors with type "http" or "https"
+ returned: success when not state == absent
+ type: dict
+ sample: [ {
+ "expected_codes": [
+ "200"
+ ],
+ "method": "GET",
+ "url_path": "/",
+ "version": "1.0",
+ "host": null
+ } ]
+tags:
+ description: Tags assosiated with the load balancer
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+ALLOWED_STATES = ('present',
+ 'absent',
+ )
+ALLOWED_HTTP_POST_PARAMS = ('expected_codes',
+ 'host',
+ 'method',
+ 'url_path')
+
+
+class AnsibleCloudscaleLoadBalancerHealthMonitor(AnsibleCloudscaleBase):
+
+ def __init__(self, module):
+ super(AnsibleCloudscaleLoadBalancerHealthMonitor, self).__init__(
+ module,
+ resource_name='load-balancers/health-monitors',
+ resource_key_name='pool',
+ resource_create_param_keys=[
+ 'pool',
+ 'timeout_s',
+ 'up_threshold',
+ 'down_threshold',
+ 'type',
+ 'http',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'delay_s',
+ 'timeout_s',
+ 'up_threshold',
+ 'down_threshold',
+ 'expected_codes',
+ 'http',
+ 'tags',
+ ],
+ )
+
+ def query(self):
+ # Initialize
+ self._resource_data = self.init_resource()
+
+ resource_key_pool = 'pool'
+ uuid = self._module.params[self.resource_key_uuid]
+ pool = self._module.params[resource_key_pool]
+ matching = []
+
+ # Either search by given health monitor's UUID or
+ # search the health monitor by its acossiated pool UUID (1:1)
+ if uuid is not None:
+ super().query()
+ else:
+ pool = self._module.params[resource_key_pool]
+ if pool is not None:
+
+ resources = self._get('%s' % (self.resource_name))
+
+ if resources:
+ for health_monitor in resources:
+ if health_monitor[resource_key_pool]['uuid'] == pool:
+ matching.append(health_monitor)
+
+ # Fail on more than one resource with identical name
+ if len(matching) > 1:
+ self._module.fail_json(
+ msg="More than one %s resource for pool '%s' exists." % (
+ self.resource_name,
+ resource_key_pool
+ )
+ )
+ elif len(matching) == 1:
+ self._resource_data = matching[0]
+ self._resource_data['state'] = "present"
+
+ return self.pre_transform(self._resource_data)
+
+ def update(self, resource):
+ updated = False
+ for param in self.resource_update_param_keys:
+ if param == 'http' and self._module.params.get('http') is not None:
+ for subparam in ALLOWED_HTTP_POST_PARAMS:
+ updated = self._http_param_updated(subparam, resource) or updated
+ else:
+ updated = self._param_updated(param, resource) or updated
+
+ # Refresh if resource was updated in live mode
+ if updated and not self._module.check_mode:
+ resource = self.query()
+ return resource
+
+ def _http_param_updated(self, key, resource):
+ param_http = self._module.params.get('http')
+ param = param_http[key]
+
+ if param is None:
+ return False
+
+ if not resource or key not in resource['http']:
+ return False
+
+ is_different = self.find_http_difference(key, resource, param)
+
+ if is_different:
+ self._result['changed'] = True
+
+ patch_data = {
+ 'http': {
+ key: param
+ }
+ }
+
+ before_data = {
+ 'http': {
+ key: resource['http'][key]
+ }
+ }
+
+ self._result['diff']['before'].update(before_data)
+ self._result['diff']['after'].update(patch_data)
+
+ if not self._module.check_mode:
+ href = resource.get('href')
+ if not href:
+ self._module.fail_json(msg='Unable to update %s, no href found.' % key)
+
+ self._patch(href, patch_data)
+ return True
+ return False
+
+ def find_http_difference(self, key, resource, param):
+ is_different = False
+
+ if param != resource['http'][key]:
+ is_different = True
+
+ return is_different
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ uuid=dict(type='str'),
+ pool=dict(type='str'),
+ delay_s=dict(type='int'),
+ timeout_s=dict(type='int'),
+ up_threshold=dict(type='int'),
+ down_threshold=dict(type='int'),
+ type=dict(type='str'),
+ http=dict(
+ type='dict',
+ options=dict(
+ expected_codes=dict(type='list', elements='str'),
+ method=dict(type='str'),
+ url_path=dict(type='str'),
+ version=dict(type='str'),
+ host=dict(type='str'),
+ )
+ ),
+ tags=dict(type='dict'),
+ state=dict(default='present', choices=ALLOWED_STATES),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(),
+ required_one_of=(),
+ required_if=(),
+ supports_check_mode=True,
+ )
+
+ cloudscale_load_balancer_health_monitor = AnsibleCloudscaleLoadBalancerHealthMonitor(module)
+ cloudscale_load_balancer_health_monitor.query_constraint_keys = []
+
+ if module.params['state'] == "absent":
+ result = cloudscale_load_balancer_health_monitor.absent()
+ else:
+ result = cloudscale_load_balancer_health_monitor.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_listener.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_listener.py
new file mode 100644
index 000000000..be91ac0b5
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_listener.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2023, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright: (c) 2023, Kenneth Joss <kenneth.joss@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: load_balancer_listener
+short_description: Manages load balancer listeners on the cloudscale.ch IaaS service
+description:
+ - Get, create, update, delete listeners on the cloudscale.ch IaaS service.
+notes:
+ - If I(uuid) option is provided, it takes precedence over I(name) for load balancer listener selection. This allows to update the listener's name.
+ - If no I(uuid) option is provided, I(name) is used for load balancer listener selection.
+ - If more than one load balancer with this name exists, execution is aborted.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - Kenneth Joss (@k-304)
+version_added: "2.3.0"
+options:
+ state:
+ description:
+ - State of the load balancer listener.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ name:
+ description:
+ - Name of the load balancer listener.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ uuid:
+ description:
+ - UUID of the load balancer listener.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ pool:
+ description:
+ - The pool of the listener.
+ type: str
+ protocol:
+ description:
+ - The protocol used for receiving traffic.
+ type: str
+ protocol_port:
+ description:
+ - The port on which traffic is received.
+ type: int
+ allowed_cidrs:
+ description:
+ - Restrict the allowed source IPs for this listener.
+ - Empty means that any source IP is allowed. If the list is non-empty, traffic from source IPs not included is denied.
+ type: list
+ elements: str
+ timeout_client_data_ms:
+ description:
+ - Client inactivity timeout in milliseconds.
+ type: int
+ timeout_member_connect_ms:
+ description:
+ - Pool member connection timeout in milliseconds.
+ type: int
+ timeout_member_data_ms:
+ description:
+ - Pool member inactivity timeout in milliseconds.
+ type: int
+ tags:
+ description:
+ - Tags assosiated with the load balancer. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create a load balancer listener for a pool using registered variables
+- name: Create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: 'swimming-pool'
+ load_balancer: '3d41b118-f95c-4897-ad74-2260fea783fc'
+ algorithm: 'round_robin'
+ protocol: 'tcp'
+ api_token: xxxxxx
+ register: load_balancer_pool
+
+- name: Create a load balancer listener
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: 'swimming-pool-listener'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: 'tcp'
+ protocol_port: 8080
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+
+# Create a load balancer listener for a pool with restriction
+- name: Create a load balancer listener with ip restriction
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: 'new-listener2'
+ pool: '618a6cc8-d757-4fab-aa10-d49dc47e667b'
+ protocol: 'tcp'
+ protocol_port: 8080
+ allowed_cidrs:
+ - '192.168.3.0/24'
+ - '2001:db8:85a3:8d3::/64'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+
+# Get load balancer listener facts by name
+- name: Get facts of a load balancer listener by name
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+href:
+ description: API URL to get details about this load balancer lintener
+ returned: success when not state == absent
+ type: str
+ sample: https://api.cloudscale.ch/v1/load-balancers/listeners/9fa91f17-fdb4-431f-8a59-78473f64e661
+uuid:
+ description: The unique identifier for this load balancer listener
+ returned: success
+ type: str
+ sample: 9fa91f17-fdb4-431f-8a59-78473f64e661
+name:
+ description: The display name of the load balancer listener
+ returned: success
+ type: str
+ sample: new-listener
+created_at:
+ description: The creation date and time of the load balancer listener
+ returned: success when not state == absent
+ type: str
+ sample: "2023-02-07T15:32:02.308041Z"
+pool:
+ description: The pool of the load balancer listener
+ returned: success when not state == absent
+ type: complex
+ contains:
+ href:
+ description: API URL to get details about the pool.
+ returned: success
+ type: str
+ sample: https://api.cloudscale.ch/v1/load-balancers/pools/618a6cc8-d757-4fab-aa10-d49dc47e667b
+ uuid:
+ description: The unique identifier for the pool.
+ returned: success
+ type: str
+ sample: 618a6cc8-d757-4fab-aa10-d49dc47e667b
+ name:
+ description: The name of the pool.
+ returned: success
+ type: str
+ sample: new-listener
+protocol:
+ description: The protocol used for receiving traffic
+ returned: success when not state == absent
+ type: str
+ sample: tcp
+protocol_port:
+ description: The port on which traffic is received
+ returned: success when not state == absent
+ type: int
+ sample: 8080
+allowed_cidrs:
+ description: Restrict the allowed source IPs for this listener
+ returned: success when not state == absent
+ type: list
+ sample: ["192.168.3.0/24", "2001:db8:85a3:8d3::/64"]
+timeout_client_data_ms:
+ description: Client inactivity timeout in milliseconds
+ returned: success when not state == absent
+ type: int
+ sample: 50000
+timeout_member_connect_ms:
+ description: Pool member connection timeout in milliseconds
+ returned: success when not state == absent
+ type: int
+ sample: 50000
+timeout_member_data_ms:
+ description: Pool member inactivity timeout in milliseconds
+ returned: success when not state == absent
+ type: int
+ sample: 50000
+tags:
+ description: Tags assosiated with the load balancer listener
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+ALLOWED_STATES = ('present',
+ 'absent',
+ )
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ pool=dict(type='str'),
+ protocol=dict(type='str'),
+ protocol_port=dict(type='int'),
+ allowed_cidrs=dict(type='list', elements='str'),
+ timeout_client_data_ms=dict(type='int'),
+ timeout_member_connect_ms=dict(type='int'),
+ timeout_member_data_ms=dict(type='int'),
+ tags=dict(type='dict'),
+ state=dict(type='str', default='present', choices=ALLOWED_STATES),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(),
+ required_one_of=(('name', 'uuid'),),
+ required_if=(('state', 'present', ('name',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_load_balancer_listener = AnsibleCloudscaleBase(
+ module,
+ resource_name='load-balancers/listeners',
+ resource_create_param_keys=[
+ 'name',
+ 'pool',
+ 'protocol',
+ 'protocol_port',
+ 'allowed_cidrs',
+ 'timeout_client_data_ms',
+ 'timeout_member_connect_ms',
+ 'timeout_member_data_ms',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'allowed_cidrs',
+ 'timeout_client_data_ms',
+ 'timeout_member_connect_ms',
+ 'timeout_member_data_ms',
+ 'tags',
+ ],
+ )
+
+ if module.params['state'] == "absent":
+ result = cloudscale_load_balancer_listener.absent()
+ else:
+ result = cloudscale_load_balancer_listener.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool.py
new file mode 100644
index 000000000..6b794a3e3
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2023, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright: (c) 2023, Kenneth Joss <kenneth.joss@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: load_balancer_pool
+short_description: Manages load balancer pools on the cloudscale.ch IaaS service
+description:
+ - Get, create, update, delete pools on the cloudscale.ch IaaS service.
+notes:
+ - If I(uuid) option is provided, it takes precedence over I(name) for pool selection. This allows to update the load balancer pool's name.
+ - If no I(uuid) option is provided, I(name) is used for pool selection. If more than one pool with this name exists, execution is aborted.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - Kenneth Joss (@k-304)
+version_added: "2.3.0"
+options:
+ state:
+ description:
+ - State of the load balancer pool.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ name:
+ description:
+ - Name of the load balancer pool.
+ type: str
+ uuid:
+ description:
+ - UUID of the load balancer pool.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ load_balancer:
+ description:
+ - UUID of the load balancer for this pool.
+ type: str
+ algorithm:
+ description:
+ - The algorithm according to which the incoming traffic is distributed between the pool members.
+ - See the [API documentation](https://www.cloudscale.ch/en/api/v1#pool-algorithms) for supported distribution algorithms.
+ type: str
+ protocol:
+ description:
+ - The protocol used for traffic between the load balancer and the pool members.
+ - See the [API documentation](https://www.cloudscale.ch/en/api/v1#pool-protocols) for supported protocols.
+ type: str
+ tags:
+ description:
+ - Tags assosiated with the load balancer. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create a pool for a load balancer using registered variables
+- name: Create a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: 'lb1'
+ flavor: 'lb-standard'
+ zone: 'lpg1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+ register: load_balancer
+
+- name: Create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: 'swimming-pool'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: 'round_robin'
+ protocol: 'tcp'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+ register: load_balancer_pool
+
+# Create a load balancer pool with algorithm: round_robin and protocol: tcp
+- name: Create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: 'cloudscale-loadbalancer-pool1'
+ load_balancer: '3766c579-3012-4a85-8192-2bbb4ef85b5f'
+ algorithm: 'round_robin'
+ protocol: 'tcp'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+
+# Get load balancer pool facts by name
+- name: Get facts of a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: cloudscale-loadbalancer-pool1
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+href:
+ description: API URL to get details about this load balancer
+ returned: success when not state == absent
+ type: str
+ sample: https://api.cloudscale.ch/v1/load-balancers/pools/
+uuid:
+ description: The unique identifier for this load balancer pool
+ returned: success
+ type: str
+ sample: 3766c579-3012-4a85-8192-2bbb4ef85b5f
+name:
+ description: The display name of the load balancer pool
+ returned: success
+ type: str
+ sample: web-lb-pool1
+created_at:
+ description: The creation date and time of the load balancer pool
+ returned: success when not state == absent
+ type: str
+ sample: "2023-02-07T15:32:02.308041Z"
+load_balancer:
+ description: The load balancer this pool is connected to
+ returned: success when not state == absent
+ type: list
+ sample: {
+ "href": "https://api.cloudscale.ch/v1/load-balancers/15264769-ac69-4809-a8e4-4d73f8f92496",
+ "uuid": "15264769-ac69-4809-a8e4-4d73f8f92496",
+ "name": "web-lb"
+ }
+algorithm:
+ description: The algorithm according to which the incoming traffic is distributed between the pool members
+ returned: success
+ type: str
+ sample: round_robin
+protocol:
+ description: The protocol used for traffic between the load balancer and the pool members
+ returned: success
+ type: str
+ sample: tcp
+state:
+ description: The current state of the load balancer pool
+ returned: success
+ type: str
+ sample: present
+tags:
+ description: Tags assosiated with the load balancer
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+ALLOWED_STATES = ('present',
+ 'absent',
+ )
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(),
+ uuid=dict(),
+ load_balancer=dict(),
+ algorithm=dict(type='str'),
+ protocol=dict(type='str'),
+ tags=dict(type='dict'),
+ state=dict(default='present', choices=ALLOWED_STATES),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(),
+ required_one_of=(('name', 'uuid'),),
+ required_if=(('state', 'present', ('name',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_load_balancer_pool = AnsibleCloudscaleBase(
+ module,
+ resource_name='load-balancers/pools',
+ resource_create_param_keys=[
+ 'name',
+ 'load_balancer',
+ 'algorithm',
+ 'protocol',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'tags',
+ ],
+ )
+ cloudscale_load_balancer_pool.query_constraint_keys = []
+
+ if module.params['state'] == "absent":
+ result = cloudscale_load_balancer_pool.absent()
+ else:
+ result = cloudscale_load_balancer_pool.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool_member.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool_member.py
new file mode 100644
index 000000000..49a2124d9
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/load_balancer_pool_member.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2023, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright: (c) 2023, Kenneth Joss <kenneth.joss@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: load_balancer_pool_member
+short_description: Manages load balancer pool members on the cloudscale.ch IaaS service
+description:
+ - Get, create, update, delete pool members on the cloudscale.ch IaaS service.
+notes:
+ - If I(uuid) option is provided, it takes precedence over I(name) for pool member selection. This allows to update the member's name.
+ - If no I(uuid) option is provided, I(name) is used for pool member selection. If more than one load balancer with this name exists, execution is aborted.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - Kenneth Joss (@k-304)
+version_added: "2.3.0"
+options:
+ state:
+ description:
+ - State of the load balancer pool member.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ name:
+ description:
+ - Name of the load balancer pool member.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ uuid:
+ description:
+ - UUID of the load balancer.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ load_balancer_pool:
+ description:
+ - UUID of the load balancer pool.
+ type: str
+ enabled:
+ description:
+ - Pool member will not receive traffic if false. Default is true.
+ default: true
+ type: bool
+ protocol_port:
+ description:
+ - The port to which actual traffic is sent.
+ type: int
+ monitor_port:
+ description:
+ - The port to which health monitor checks are sent.
+ - If not specified, protocol_port will be used. Default is null.
+ default: null
+ type: int
+ address:
+ description:
+ - The IP address to which traffic is sent.
+ type: str
+ subnet:
+ description:
+ - The subnet of the address must be specified here.
+ type: str
+ tags:
+ description:
+ - Tags assosiated with the load balancer. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create a pool member for a load balancer pool using registered variables
+- name: Create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: 'swimming-pool'
+ load_balancer: '514064c2-cfd4-4b0c-8a4b-c68c552ff84f'
+ algorithm: 'round_robin'
+ protocol: 'tcp'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+ register: load_balancer_pool
+
+- name: Create a load balancer pool member
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: 'my-shiny-swimming-pool-member'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: true
+ protocol_port: 8080
+ monitor_port: 8081
+ subnet: '70d282ab-2a01-4abb-ada5-34e56a5a7eee'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ api_token: xxxxxx
+
+# Get load balancer pool member facts by name
+- name: Get facts of a load balancer pool member by name
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: 'my-shiny-swimming-pool-member'
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+href:
+ description: API URL to get details about this load balancer
+ returned: success when not state == absent
+ type: str
+ sample: https://api.cloudscale.ch/v1/load-balancers/pools/20a7eb11-3e17-4177-b46d-36e13b101d1c/members/b9991773-857d-47f6-b20b-0a03709529a9
+uuid:
+ description: The unique identifier for this load balancer pool member
+ returned: success
+ type: str
+ sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
+name:
+ description: The display name of the load balancer pool member
+ returned: success
+ type: str
+ sample: web-lb-pool
+enabled:
+ description: THe status of the load balancer pool member
+ returned: success
+ type: bool
+ sample: true
+created_at:
+ description: The creation date and time of the load balancer pool member
+ returned: success when not state == absent
+ type: str
+ sample: "2023-02-07T15:32:02.308041Z"
+pool:
+ description: The pool of the pool member
+ returned: success
+ type: dict
+ sample: {
+ "href": "https://api.cloudscale.ch/v1/load-balancers/pools/20a7eb11-3e17-4177-b46d-36e13b101d1c",
+ "uuid": "20a7eb11-3e17-4177-b46d-36e13b101d1c",
+ "name": "web-lb-pool"
+ }
+protocol_port:
+ description: The port to which actual traffic is sent
+ returned: success
+ type: int
+ sample: 8080
+monitor_port:
+ description: The port to which health monitor checks are sent
+ returned: success
+ type: int
+ sample: 8081
+address:
+ description: The IP address to which traffic is sent
+ returned: success
+ type: str
+ sample: 10.11.12.3
+subnet:
+ description: The subnet in a private network in which address is located
+ returned: success
+ type: dict
+ sample: {
+ "href": "https://api.cloudscale.ch/v1/subnets/70d282ab-2a01-4abb-ada5-34e56a5a7eee",
+ "uuid": "70d282ab-2a01-4abb-ada5-34e56a5a7eee",
+ "cidr": "10.11.12.0/24"
+ }
+monitor_status:
+ description: The status of the pool's health monitor check for this member
+ returned: success
+ type: str
+ sample: up
+tags:
+ description: Tags assosiated with the load balancer
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+ALLOWED_STATES = ('present',
+ 'absent',
+ )
+
+
+class AnsibleCloudscaleLoadBalancerPoolMember(AnsibleCloudscaleBase):
+
+ def __init__(self, module):
+ super(AnsibleCloudscaleLoadBalancerPoolMember, self).__init__(
+ module,
+ resource_name='load-balancers/pools/%s/members' % module.params['load_balancer_pool'],
+ resource_create_param_keys=[
+ 'name',
+ 'enabled',
+ 'protocol_port',
+ 'monitor_port',
+ 'address',
+ 'subnet',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'enabled',
+ 'tags',
+ ],
+ )
+
+ def query(self):
+ # Initialize
+ self._resource_data = self.init_resource()
+
+ # Query by UUID
+ uuid = self._module.params[self.resource_key_uuid]
+ if uuid is not None:
+
+ # network id case
+ if "/" in uuid:
+ uuid = uuid.split("/")[0]
+
+ resource = self._get('%s/%s' % (self.resource_name, uuid))
+ if resource:
+ self._resource_data = resource
+ self._resource_data['state'] = "present"
+
+ # Query by name
+ else:
+ name = self._module.params[self.resource_key_name]
+
+ # Resource has no name field, we use a defined tag as name
+ if self.use_tag_for_name:
+ resources = self._get('%s?tag:%s=%s' % (self.resource_name, self.resource_name_tag, name))
+ else:
+ resources = self._get('%s' % self.resource_name)
+
+ matching = []
+ if resources is None:
+ self._module.fail_json(
+ msg="The load balancer pool %s does not exist."
+ % (self.resource_name,)
+ )
+ for resource in resources:
+ if self.use_tag_for_name:
+ resource[self.resource_key_name] = resource['tags'].get(self.resource_name_tag)
+
+ # Skip resource if constraints is not given e.g. in case of floating_ip the ip_version differs
+ for constraint_key in self.query_constraint_keys:
+ if self._module.params[constraint_key] is not None:
+ if constraint_key == 'zone':
+ resource_value = resource['zone']['slug']
+ else:
+ resource_value = resource[constraint_key]
+
+ if resource_value != self._module.params[constraint_key]:
+ break
+ else:
+ if resource[self.resource_key_name] == name:
+ matching.append(resource)
+
+ # Fail on more than one resource with identical name
+ if len(matching) > 1:
+ self._module.fail_json(
+ msg="More than one %s resource with '%s' exists: %s. "
+ "Use the '%s' parameter to identify the resource." % (
+ self.resource_name,
+ self.resource_key_name,
+ name,
+ self.resource_key_uuid
+ )
+ )
+ elif len(matching) == 1:
+ self._resource_data = matching[0]
+ self._resource_data['state'] = "present"
+
+ return self.pre_transform(self._resource_data)
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(),
+ uuid=dict(),
+ load_balancer_pool=dict(type='str'),
+ enabled=dict(type='bool', default=True),
+ protocol_port=dict(type='int'),
+ monitor_port=dict(type='int'),
+ subnet=dict(type='str'),
+ address=dict(type='str'),
+ tags=dict(type='dict'),
+ state=dict(default='present', choices=ALLOWED_STATES),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(),
+ required_one_of=(('name', 'uuid'),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_load_balancer_pool_member = AnsibleCloudscaleLoadBalancerPoolMember(module)
+ cloudscale_load_balancer_pool_member.query_constraint_keys = []
+
+ if module.params['state'] == "absent":
+ result = cloudscale_load_balancer_pool_member.absent()
+ else:
+ result = cloudscale_load_balancer_pool_member.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py
new file mode 100644
index 000000000..7b1da5b2e
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: network
+short_description: Manages networks on the cloudscale.ch IaaS service
+description:
+ - Create, update and remove networks.
+author:
+ - René Moser (@resmo)
+version_added: "1.2.0"
+options:
+ name:
+ description:
+ - Name of the network.
+ - Either I(name) or I(uuid) is required.
+ type: str
+ uuid:
+ description:
+ - UUID of the network.
+ - Either I(name) or I(uuid) is required.
+ type: str
+ mtu:
+ description:
+ - The MTU of the network.
+ default: 9000
+ type: int
+ auto_create_ipv4_subnet:
+ description:
+ - Whether to automatically create an IPv4 subnet in the network or not.
+ default: true
+ type: bool
+ zone:
+ description:
+ - Zone slug of the network (e.g. C(lpg1) or C(rma1)).
+ type: str
+ state:
+ description:
+ - State of the network.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ tags:
+ description:
+ - Tags assosiated with the networks. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+---
+- name: Ensure network exists
+ cloudscale_ch.cloud.network:
+ name: my network
+ api_token: xxxxxx
+
+- name: Ensure network in a specific zone
+ cloudscale_ch.cloud.network:
+ name: my network
+ zone: lpg1
+ api_token: xxxxxx
+
+- name: Ensure a network is absent
+ cloudscale_ch.cloud.network:
+ name: my network
+ state: absent
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+---
+href:
+ description: API URL to get details about this network.
+ returned: success
+ type: str
+ sample: https://api.cloudscale.ch/v1/networks/cfde831a-4e87-4a75-960f-89b0148aa2cc
+uuid:
+ description: The unique identifier for the network.
+ returned: success
+ type: str
+ sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
+name:
+ description: The name of the network.
+ returned: success
+ type: str
+ sample: my network
+created_at:
+ description: The creation date and time of the network.
+ returned: success
+ type: str
+ sample: "2019-05-29T13:18:42.511407Z"
+subnets:
+ description: A list of subnets objects of the network.
+ returned: success
+ type: complex
+ contains:
+ href:
+ description: API URL to get details about the subnet.
+ returned: success
+ type: str
+ sample: https://api.cloudscale.ch/v1/subnets/33333333-1864-4608-853a-0771b6885a3
+ uuid:
+ description: The unique identifier for the subnet.
+ returned: success
+ type: str
+ sample: 33333333-1864-4608-853a-0771b6885a3
+ cidr:
+ description: The CIDR of the subnet.
+ returned: success
+ type: str
+ sample: 172.16.0.0/24
+mtu:
+ description: The MTU of the network.
+ returned: success
+ type: int
+ sample: 9000
+zone:
+ description: The zone of the network.
+ returned: success
+ type: dict
+ sample: { 'slug': 'rma1' }
+state:
+ description: State of the network.
+ returned: success
+ type: str
+ sample: present
+tags:
+ description: Tags assosiated with the network.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ mtu=dict(type='int', default=9000),
+ auto_create_ipv4_subnet=dict(type='bool', default=True),
+ zone=dict(type='str'),
+ tags=dict(type='dict'),
+ state=dict(default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('name', 'uuid'),),
+ required_if=(('state', 'present', ('name',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_network = AnsibleCloudscaleBase(
+ module,
+ resource_name='networks',
+ resource_create_param_keys=[
+ 'name',
+ 'mtu',
+ 'auto_create_ipv4_subnet',
+ 'zone',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'mtu',
+ 'tags',
+ ],
+ )
+
+ cloudscale_network.query_constraint_keys = [
+ 'zone',
+ ]
+
+ if module.params['state'] == 'absent':
+ result = cloudscale_network.absent()
+ else:
+ result = cloudscale_network.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py
new file mode 100644
index 000000000..d4d117817
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: objects_user
+short_description: Manages objects users on the cloudscale.ch IaaS service
+description:
+ - Create, update and remove objects users cloudscale.ch IaaS service.
+author:
+ - Rene Moser (@resmo)
+version_added: 1.1.0
+options:
+ display_name:
+ description:
+ - Display name of the objects user.
+ - Either I(display_name) or I(id) is required.
+ type: str
+ aliases:
+ - name
+ id:
+ description:
+ - Name of the objects user.
+ - Either I(display_name) or I(id) is required.
+ type: str
+ tags:
+ description:
+ - Tags associated with the objects user. Set this to C({}) to clear any tags.
+ type: dict
+ state:
+ description:
+ - State of the objects user.
+ default: present
+ choices: [ present, absent ]
+ type: str
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = r'''
+- name: Create an objects user
+ cloudscale_ch.cloud.objects_user:
+ display_name: alan
+ tags:
+ project: luna
+ api_token: xxxxxx
+ register: object_user
+
+- name: print keys
+ debug:
+ var: object_user.keys
+
+- name: Update an objects user
+ cloudscale_ch.cloud.objects_user:
+ display_name: alan
+ tags:
+ project: gemini
+ api_token: xxxxxx
+
+- name: Remove an objects user
+ cloudscale_ch.cloud.objects_user:
+ display_name: alan
+ state: absent
+ api_token: xxxxxx
+'''
+
+RETURN = r'''
+href:
+ description: The API URL to get details about this resource.
+ returned: success when state == present
+ type: str
+ sample: https://api.cloudscale.ch/v1/objects-users/6fe39134bf4178747eebc429f82cfafdd08891d4279d0d899bc4012db1db6a15
+display_name:
+ description: The display name of the objects user.
+ returned: success
+ type: str
+ sample: alan
+id:
+ description: The ID of the objects user.
+ returned: success
+ type: str
+ sample: 6fe39134bf4178747eebc429f82cfafdd08891d4279d0d899bc4012db1db6a15
+keys:
+ description: List of key objects.
+ returned: success
+ type: complex
+ contains:
+ access_key:
+ description: The access key.
+ returned: success
+ type: str
+ sample: 0ZTAIBKSGYBRHQ09G11W
+ secret_key:
+ description: The secret key.
+ returned: success
+ type: str
+ sample: bn2ufcwbIa0ARLc5CLRSlVaCfFxPHOpHmjKiH34T
+tags:
+ description: Tags assosiated with the objects user.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+state:
+ description: The current status of the objects user.
+ returned: success
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ..module_utils.api import AnsibleCloudscaleBase, cloudscale_argument_spec
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ display_name=dict(type='str', aliases=['name']),
+ id=dict(type='str'),
+ tags=dict(type='dict'),
+ state=dict(type='str', default='present', choices=('present', 'absent')),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('display_name', 'id'),),
+ required_if=(('state', 'present', ('display_name',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_objects_user = AnsibleCloudscaleBase(
+ module,
+ resource_name='objects-users',
+ resource_key_uuid='id',
+ resource_key_name='display_name',
+ resource_create_param_keys=[
+ 'display_name',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'display_name',
+ 'tags',
+ ],
+ )
+
+ if module.params['state'] == "absent":
+ result = cloudscale_objects_user.absent()
+ else:
+ result = cloudscale_objects_user.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py
new file mode 100644
index 000000000..d912750c1
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py
@@ -0,0 +1,737 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright: (c) 2019, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: server
+short_description: Manages servers on the cloudscale.ch IaaS service
+description:
+ - Create, update, start, stop and delete servers on the cloudscale.ch IaaS service.
+notes:
+ - If I(uuid) option is provided, it takes precedence over I(name) for server selection. This allows to update the server's name.
+ - If no I(uuid) option is provided, I(name) is used for server selection. If more than one server with this name exists, execution is aborted.
+ - Only the I(name) and I(flavor) are evaluated for the update.
+ - The option I(force=true) must be given to allow the reboot of existing running servers for applying the changes.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - René Moser (@resmo)
+ - Denis Krienbühl (@href)
+version_added: "1.0.0"
+options:
+ state:
+ description:
+ - State of the server.
+ choices: [ running, stopped, absent ]
+ default: running
+ type: str
+ name:
+ description:
+ - Name of the Server.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ uuid:
+ description:
+ - UUID of the server.
+ - Either I(name) or I(uuid) are required.
+ type: str
+ flavor:
+ description:
+ - Flavor of the server.
+ type: str
+ image:
+ description:
+ - Image used to create the server.
+ type: str
+ zone:
+ description:
+ - Zone in which the server resides (e.g. C(lpg1) or C(rma1)).
+ type: str
+ volume_size_gb:
+ description:
+ - Size of the root volume in GB.
+ default: 10
+ type: int
+ bulk_volume_size_gb:
+ description:
+ - Size of the bulk storage volume in GB.
+ - No bulk storage volume if not set.
+ type: int
+ ssh_keys:
+ description:
+ - List of SSH public keys.
+ - Use the full content of your .pub file here.
+ type: list
+ elements: str
+ password:
+ description:
+ - Password for the server.
+ type: str
+ use_public_network:
+ description:
+ - Attach a public network interface to the server.
+ type: bool
+ use_private_network:
+ description:
+ - Attach a private network interface to the server.
+ type: bool
+ use_ipv6:
+ description:
+ - Enable IPv6 on the public network interface.
+ default: true
+ type: bool
+ interfaces:
+ description:
+ - List of network interface objects specifying the interfaces to be attached to the server.
+ See U(https://www.cloudscale.ch/en/api/v1/#interfaces-attribute-specification) for more details.
+ type: list
+ elements: dict
+ version_added: 1.4.0
+ suboptions:
+ network:
+ description:
+ - Create a network interface on the network identified by UUID.
+ Use 'public' instead of an UUID to attach a public network interface.
+ Can be omitted if a subnet is provided under addresses.
+ type: str
+ addresses:
+ description:
+ - Attach a private network interface and configure a subnet and/or an IP address.
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - UUID of the subnet from which an address will be assigned.
+ type: str
+ address:
+ description:
+ - The static IP address of the interface. Use '[]' to avoid assigning an IP address via DHCP.
+ type: str
+ server_groups:
+ description:
+ - List of UUID or names of server groups.
+ type: list
+ elements: str
+ user_data:
+ description:
+ - Cloud-init configuration (cloud-config) data to use for the server.
+ type: str
+ force:
+ description:
+ - Allow to stop the running server for updating if necessary.
+ default: false
+ type: bool
+ tags:
+ description:
+ - Tags assosiated with the servers. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create and start a server with an existing server group (shiny-group)
+- name: Start cloudscale.ch server
+ cloudscale_ch.cloud.server:
+ name: my-shiny-cloudscale-server
+ image: debian-10
+ flavor: flex-4-4
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ server_groups: shiny-group
+ zone: lpg1
+ use_private_network: true
+ bulk_volume_size_gb: 100
+ api_token: xxxxxx
+
+# Start another server in anti-affinity (server group shiny-group)
+- name: Start second cloudscale.ch server
+ cloudscale_ch.cloud.server:
+ name: my-other-shiny-server
+ image: ubuntu-16.04
+ flavor: flex-8-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ server_groups: shiny-group
+ zone: lpg1
+ api_token: xxxxxx
+
+# Force to update the flavor of a running server
+- name: Start cloudscale.ch server
+ cloudscale_ch.cloud.server:
+ name: my-shiny-cloudscale-server
+ image: debian-10
+ flavor: flex-8-2
+ force: true
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ use_private_network: true
+ bulk_volume_size_gb: 100
+ api_token: xxxxxx
+ register: server1
+
+# Stop the first server
+- name: Stop my first server
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server1.uuid }}'
+ state: stopped
+ api_token: xxxxxx
+
+# Delete my second server
+- name: Delete my second server
+ cloudscale_ch.cloud.server:
+ name: my-other-shiny-server
+ state: absent
+ api_token: xxxxxx
+
+# Start a server and wait for the SSH host keys to be generated
+- name: Start server and wait for SSH host keys
+ cloudscale_ch.cloud.server:
+ name: my-cloudscale-server-with-ssh-key
+ image: debian-10
+ flavor: flex-4-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ api_token: xxxxxx
+ register: server
+ until: server is not failed
+ retries: 5
+ delay: 2
+
+# Start a server with two network interfaces:
+#
+# A public interface with IPv4/IPv6
+# A private interface on a specific private network with an IPv4 address
+
+- name: Start a server with a public and private network interface
+ cloudscale_ch.cloud.server:
+ name: my-cloudscale-server-with-two-network-interfaces
+ image: debian-10
+ flavor: flex-4-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ api_token: xxxxxx
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: UUID_of_private_subnet
+
+# Start a server with a specific IPv4 address from subnet range
+- name: Start a server with a specific IPv4 address from subnet range
+ cloudscale_ch.cloud.server:
+ name: my-cloudscale-server-with-specific-address
+ image: debian-10
+ flavor: flex-4-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ api_token: xxxxxx
+ interfaces:
+ - addresses:
+ - subnet: UUID_of_private_subnet
+ address: 'A.B.C.D'
+
+# Start a server with two network interfaces:
+#
+# A public interface with IPv4/IPv6
+# A private interface on a specific private network with no IPv4 address
+
+- name: Start a server with a private network interface and no IP address
+ cloudscale_ch.cloud.server:
+ name: my-cloudscale-server-with-specific-address
+ image: debian-10
+ flavor: flex-4-2
+ ssh_keys:
+ - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
+ api_token: xxxxxx
+ interfaces:
+ - network: 'public'
+ - network: UUID_of_private_network
+ addresses: []
+'''
+
+RETURN = '''
+href:
+ description: API URL to get details about this server
+ returned: success when not state == absent
+ type: str
+ sample: https://api.cloudscale.ch/v1/servers/cfde831a-4e87-4a75-960f-89b0148aa2cc
+uuid:
+ description: The unique identifier for this server
+ returned: success
+ type: str
+ sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
+name:
+ description: The display name of the server
+ returned: success
+ type: str
+ sample: its-a-me-mario.cloudscale.ch
+state:
+ description: The current status of the server
+ returned: success
+ type: str
+ sample: running
+flavor:
+ description: The flavor that has been used for this server
+ returned: success when not state == absent
+ type: dict
+ sample: { "slug": "flex-4-2", "name": "Flex-4-2", "vcpu_count": 2, "memory_gb": 4 }
+image:
+ description: The image used for booting this server
+ returned: success when not state == absent
+ type: dict
+ sample: { "default_username": "ubuntu", "name": "Ubuntu 18.04 LTS", "operating_system": "Ubuntu", "slug": "ubuntu-18.04" }
+zone:
+ description: The zone used for booting this server
+ returned: success when not state == absent
+ type: dict
+ sample: { 'slug': 'lpg1' }
+volumes:
+ description: List of volumes attached to the server
+ returned: success when not state == absent
+ type: list
+ sample: [ {"type": "ssd", "device": "/dev/vda", "size_gb": "50"} ]
+interfaces:
+ description: List of network ports attached to the server
+ returned: success when not state == absent
+ type: list
+ sample: [ { "type": "public", "addresses": [ ... ] } ]
+ssh_fingerprints:
+ description: A list of SSH host key fingerprints. Will be null until the host keys could be retrieved from the server.
+ returned: success when not state == absent
+ type: list
+ sample: ["ecdsa-sha2-nistp256 SHA256:XXXX", ... ]
+ssh_host_keys:
+ description: A list of SSH host keys. Will be null until the host keys could be retrieved from the server.
+ returned: success when not state == absent
+ type: list
+ sample: ["ecdsa-sha2-nistp256 XXXXX", ... ]
+server_groups:
+ description: List of server groups
+ returned: success when not state == absent
+ type: list
+ sample: [ {"href": "https://api.cloudscale.ch/v1/server-groups/...", "uuid": "...", "name": "db-group"} ]
+tags:
+ description: Tags assosiated with the server.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from datetime import datetime, timedelta
+from time import sleep
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+ALLOWED_STATES = ('running',
+ 'stopped',
+ 'absent',
+ )
+
+
+class AnsibleCloudscaleServer(AnsibleCloudscaleBase):
+
+ def __init__(self, module):
+ super(AnsibleCloudscaleServer, self).__init__(module)
+
+ # Initialize server dictionary
+ self._info = {}
+
+ def _init_server_container(self):
+ return {
+ 'uuid': self._module.params.get('uuid') or self._info.get('uuid'),
+ 'name': self._module.params.get('name') or self._info.get('name'),
+ 'state': 'absent',
+ }
+
+ def _get_server_info(self, refresh=False):
+ if self._info and not refresh:
+ return self._info
+
+ self._info = self._init_server_container()
+
+ uuid = self._info.get('uuid')
+ if uuid is not None:
+ server_info = self._get('servers/%s' % uuid)
+ if server_info:
+ self._info = self._transform_state(server_info)
+
+ else:
+ name = self._info.get('name')
+ if name is not None:
+ servers = self._get('servers') or []
+ matching_server = []
+ for server in servers:
+ if server['name'] == name:
+ matching_server.append(server)
+
+ if len(matching_server) == 1:
+ self._info = self._transform_state(matching_server[0])
+ elif len(matching_server) > 1:
+ self._module.fail_json(msg="More than one server with name '%s' exists. "
+ "Use the 'uuid' parameter to identify the server." % name)
+
+ return self._info
+
+ @staticmethod
+ def _transform_state(server):
+ if 'status' in server:
+ server['state'] = server['status']
+ del server['status']
+ else:
+ server['state'] = 'absent'
+ return server
+
+ def _wait_for_state(self, states):
+ start = datetime.now()
+ timeout = self._module.params['api_timeout'] * 2
+ while datetime.now() - start < timedelta(seconds=timeout):
+ server_info = self._get_server_info(refresh=True)
+ if server_info.get('state') in states:
+ return server_info
+ sleep(1)
+
+ # Timeout succeeded
+ if server_info.get('name') is not None:
+ msg = "Timeout while waiting for a state change on server %s to states %s. " \
+ "Current state is %s." % (server_info.get('name'), states, server_info.get('state'))
+ else:
+ name_uuid = self._module.params.get('name') or self._module.params.get('uuid')
+ msg = 'Timeout while waiting to find the server %s' % name_uuid
+
+ self._module.fail_json(msg=msg)
+
+ def _start_stop_server(self, server_info, target_state="running", ignore_diff=False):
+ actions = {
+ 'stopped': 'stop',
+ 'running': 'start',
+ }
+
+ server_state = server_info.get('state')
+ if server_state != target_state:
+ self._result['changed'] = True
+
+ if not ignore_diff:
+ self._result['diff']['before'].update({
+ 'state': server_info.get('state'),
+ })
+ self._result['diff']['after'].update({
+ 'state': target_state,
+ })
+ if not self._module.check_mode:
+ self._post('servers/%s/%s' % (server_info['uuid'], actions[target_state]))
+ server_info = self._wait_for_state((target_state, ))
+
+ return server_info
+
+ def _update_param(self, param_key, server_info, requires_stop=False):
+ param_value = self._module.params.get(param_key)
+ if param_value is None:
+ return server_info
+
+ if 'slug' in server_info[param_key]:
+ server_v = server_info[param_key]['slug']
+ else:
+ server_v = server_info[param_key]
+
+ if server_v != param_value:
+ # Set the diff output
+ self._result['diff']['before'].update({param_key: server_v})
+ self._result['diff']['after'].update({param_key: param_value})
+
+ if server_info.get('state') == "running":
+ if requires_stop and not self._module.params.get('force'):
+ self._module.warn("Some changes won't be applied to running servers. "
+ "Use force=true to allow the server '%s' to be stopped/started." % server_info['name'])
+ return server_info
+
+ # Either the server is stopped or change is forced
+ self._result['changed'] = True
+ if not self._module.check_mode:
+
+ if requires_stop:
+ self._start_stop_server(server_info, target_state="stopped", ignore_diff=True)
+
+ patch_data = {
+ param_key: param_value,
+ }
+
+ # Response is 204: No Content
+ self._patch('servers/%s' % server_info['uuid'], patch_data)
+
+ # State changes to "changing" after update, waiting for stopped/running
+ server_info = self._wait_for_state(('stopped', 'running'))
+
+ return server_info
+
+ def _get_server_group_ids(self):
+ server_group_params = self._module.params['server_groups']
+ if not server_group_params:
+ return None
+
+ matching_group_names = []
+ results = []
+ server_groups = self._get('server-groups')
+ for server_group in server_groups:
+ if server_group['uuid'] in server_group_params:
+ results.append(server_group['uuid'])
+ server_group_params.remove(server_group['uuid'])
+
+ elif server_group['name'] in server_group_params:
+ results.append(server_group['uuid'])
+ server_group_params.remove(server_group['name'])
+ # Remember the names found
+ matching_group_names.append(server_group['name'])
+
+ # Names are not unique, verify if name already found in previous iterations
+ elif server_group['name'] in matching_group_names:
+ self._module.fail_json(msg="More than one server group with name exists: '%s'. "
+ "Use the 'uuid' parameter to identify the server group." % server_group['name'])
+
+ if server_group_params:
+ self._module.fail_json(msg="Server group name or UUID not found: %s" % ', '.join(server_group_params))
+
+ return results
+
+ def _create_server(self, server_info):
+ self._result['changed'] = True
+ self.normalize_interfaces_param()
+
+ data = deepcopy(self._module.params)
+ for i in ('uuid', 'state', 'force', 'api_timeout', 'api_token', 'api_url'):
+ del data[i]
+ data['server_groups'] = self._get_server_group_ids()
+
+ self._result['diff']['before'] = self._init_server_container()
+ self._result['diff']['after'] = deepcopy(data)
+ if not self._module.check_mode:
+ self._post('servers', data)
+ server_info = self._wait_for_state(('running', ))
+ return server_info
+
+ def _update_server(self, server_info):
+
+ previous_state = server_info.get('state')
+
+ # The API doesn't support to update server groups.
+ # Show a warning to the user if the desired state does not match.
+ desired_server_group_ids = self._get_server_group_ids()
+ if desired_server_group_ids is not None:
+ current_server_group_ids = [grp['uuid'] for grp in server_info['server_groups']]
+ if desired_server_group_ids != current_server_group_ids:
+ self._module.warn("Server groups can not be mutated, server needs redeployment to change groups.")
+
+ # Remove interface properties that were not filled out by the user
+ self.normalize_interfaces_param()
+
+ # Compare the interfaces as specified by the user, with the interfaces
+ # as received by the API. The structures are somewhat different, so
+ # they need to be evaluated in detail
+ wanted = self._module.params.get('interfaces')
+ actual = server_info.get('interfaces')
+
+ try:
+ update_interfaces = not self.has_wanted_interfaces(wanted, actual)
+ except KeyError as e:
+ self._module.fail_json(
+ msg="Error checking 'interfaces', missing key: %s" % e.args[0])
+
+ if update_interfaces:
+ server_info = self._update_param('interfaces', server_info)
+
+ if not self._result['changed']:
+ self._result['changed'] = server_info['interfaces'] != actual
+
+ server_info = self._update_param('flavor', server_info, requires_stop=True)
+ server_info = self._update_param('name', server_info)
+ server_info = self._update_param('tags', server_info)
+
+ if previous_state == "running":
+ server_info = self._start_stop_server(server_info, target_state="running", ignore_diff=True)
+
+ return server_info
+
+ def present_server(self):
+ server_info = self._get_server_info()
+
+ if server_info.get('state') != "absent":
+
+ # If target state is stopped, stop before an potential update and force would not be required
+ if self._module.params.get('state') == "stopped":
+ server_info = self._start_stop_server(server_info, target_state="stopped")
+
+ server_info = self._update_server(server_info)
+
+ if self._module.params.get('state') == "running":
+ server_info = self._start_stop_server(server_info, target_state="running")
+ else:
+ server_info = self._create_server(server_info)
+ server_info = self._start_stop_server(server_info, target_state=self._module.params.get('state'))
+
+ return server_info
+
+ def absent_server(self):
+ server_info = self._get_server_info()
+ if server_info.get('state') != "absent":
+ self._result['changed'] = True
+ self._result['diff']['before'] = deepcopy(server_info)
+ self._result['diff']['after'] = self._init_server_container()
+ if not self._module.check_mode:
+ self._delete('servers/%s' % server_info['uuid'])
+ server_info = self._wait_for_state(('absent', ))
+ return server_info
+
+ def has_wanted_interfaces(self, wanted, actual):
+ """ Compares the interfaces as specified by the user, with the
+ interfaces as reported by the server.
+
+ """
+
+ if len(wanted or ()) != len(actual or ()):
+ return False
+
+ def match_interface(spec):
+
+ # First, find the interface that belongs to the spec
+ for interface in actual:
+
+ # If we have a public network, only look for the right type
+ if spec.get('network') == 'public':
+ if interface['type'] == 'public':
+ break
+
+ # If we have a private network, check the network's UUID
+ if spec.get('network') is not None:
+ if interface['type'] == 'private':
+ if interface['network']['uuid'] == spec['network']:
+ break
+
+ # If we only have an addresses block, match all subnet UUIDs
+ wanted_subnet_ids = set(
+ a['subnet'] for a in (spec.get('addresses') or ()))
+
+ actual_subnet_ids = set(
+ a['subnet']['uuid'] for a in interface['addresses'])
+
+ if wanted_subnet_ids == actual_subnet_ids:
+ break
+ else:
+ return False # looped through everything without match
+
+ # Fail if any of the addresses don't match
+ for wanted_addr in (spec.get('addresses') or ()):
+
+ # Unspecified, skip
+ if 'address' not in wanted_addr:
+ continue
+
+ addresses = set(a['address'] for a in interface['addresses'])
+ if wanted_addr['address'] not in addresses:
+ return False
+
+ # If the wanted address is an empty list, but the actual list is
+ # not, the user wants to remove automatically set addresses
+ if spec.get('addresses') == [] and interface['addresses'] != []:
+ return False
+
+ if interface['addresses'] == [] and spec.get('addresses') != []:
+ return False
+
+ return interface
+
+ for spec in wanted:
+
+ # If there is any interface that does not match, clearly not all
+ # wanted interfaces are present
+ if not match_interface(spec):
+ return False
+
+ return True
+
+ def normalize_interfaces_param(self):
+ """ Goes through the interfaces parameter and gets it ready to be
+ sent to the API. """
+
+ for spec in (self._module.params.get('interfaces') or ()):
+ if spec['addresses'] is None:
+ del spec['addresses']
+ if spec['network'] is None:
+ del spec['network']
+
+ for address in (spec.get('addresses') or ()):
+ if address['address'] is None:
+ del address['address']
+ if address['subnet'] is None:
+ del address['subnet']
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='running', choices=ALLOWED_STATES),
+ name=dict(),
+ uuid=dict(),
+ flavor=dict(),
+ image=dict(),
+ zone=dict(),
+ volume_size_gb=dict(type='int', default=10),
+ bulk_volume_size_gb=dict(type='int'),
+ ssh_keys=dict(type='list', elements='str', no_log=False),
+ password=dict(no_log=True),
+ use_public_network=dict(type='bool'),
+ use_private_network=dict(type='bool'),
+ use_ipv6=dict(type='bool', default=True),
+ interfaces=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ network=dict(type='str'),
+ addresses=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ address=dict(type='str'),
+ subnet=dict(type='str'),
+ ),
+ ),
+ ),
+ ),
+ server_groups=dict(type='list', elements='str'),
+ user_data=dict(),
+ force=dict(type='bool', default=False),
+ tags=dict(type='dict'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(
+ ['interfaces', 'use_public_network'],
+ ['interfaces', 'use_private_network'],
+ ),
+ required_one_of=(('name', 'uuid'),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_server = AnsibleCloudscaleServer(module)
+ if module.params['state'] == "absent":
+ server = cloudscale_server.absent_server()
+ else:
+ server = cloudscale_server.present_server()
+
+ result = cloudscale_server.get_result(server)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py
new file mode 100644
index 000000000..f4dc9c319
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: server_group
+short_description: Manages server groups on the cloudscale.ch IaaS service
+description:
+ - Create, update and remove server groups.
+author:
+ - René Moser (@resmo)
+ - Denis Krienbühl (@href)
+version_added: "1.0.0"
+options:
+ name:
+ description:
+ - Name of the server group.
+ - Either I(name) or I(uuid) is required. These options are mutually exclusive.
+ type: str
+ uuid:
+ description:
+ - UUID of the server group.
+ - Either I(name) or I(uuid) is required. These options are mutually exclusive.
+ type: str
+ type:
+ description:
+ - Type of the server group.
+ default: anti-affinity
+ type: str
+ zone:
+ description:
+ - Zone slug of the server group (e.g. C(lpg1) or C(rma1)).
+ type: str
+ state:
+ description:
+ - State of the server group.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ tags:
+ description:
+ - Tags assosiated with the server groups. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+---
+- name: Ensure server group exists
+ cloudscale_ch.cloud.server_group:
+ name: my-name
+ type: anti-affinity
+ api_token: xxxxxx
+
+- name: Ensure server group in a specific zone
+ cloudscale_ch.cloud.server_group:
+ name: my-rma-group
+ type: anti-affinity
+ zone: lpg1
+ api_token: xxxxxx
+
+- name: Ensure a server group is absent
+ cloudscale_ch.cloud.server_group:
+ name: my-name
+ state: absent
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+---
+href:
+ description: API URL to get details about this server group
+ returned: if available
+ type: str
+ sample: https://api.cloudscale.ch/v1/server-group/cfde831a-4e87-4a75-960f-89b0148aa2cc
+uuid:
+ description: The unique identifier for this server
+ returned: always
+ type: str
+ sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
+name:
+ description: The display name of the server group
+ returned: always
+ type: str
+ sample: load balancers
+type:
+ description: The type the server group
+ returned: if available
+ type: str
+ sample: anti-affinity
+zone:
+ description: The zone of the server group
+ returned: success
+ type: dict
+ sample: { 'slug': 'rma1' }
+servers:
+ description: A list of servers that are part of the server group.
+ returned: if available
+ type: list
+ sample: []
+state:
+ description: State of the server group.
+ returned: always
+ type: str
+ sample: present
+tags:
+ description: Tags assosiated with the server group.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ type=dict(type='str', default='anti-affinity'),
+ zone=dict(type='str'),
+ tags=dict(type='dict'),
+ state=dict(default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('name', 'uuid'),),
+ required_if=(('state', 'present', ('name',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_server_group = AnsibleCloudscaleBase(
+ module,
+ resource_name='server-groups',
+ resource_create_param_keys=[
+ 'name',
+ 'type',
+ 'zone',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'tags',
+ ],
+ )
+ cloudscale_server_group.query_constraint_keys = [
+ 'zone',
+ ]
+
+ if module.params['state'] == 'absent':
+ result = cloudscale_server_group.absent()
+ else:
+ result = cloudscale_server_group.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py
new file mode 100644
index 000000000..b5e50306b
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, René Moser <rene.moser@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: subnet
+short_description: Manages subnets on the cloudscale.ch IaaS service
+description:
+ - Create, update and remove subnets.
+author:
+ - René Moser (@resmo)
+version_added: "1.3.0"
+options:
+ uuid:
+ description:
+ - UUID of the subnet.
+ type: str
+ cidr:
+ description:
+ - The cidr of the subnet.
+ - Required if I(state=present).
+ type: str
+ network:
+ description:
+ - The name of the network the subnet is related to.
+ - Required if I(state=present).
+ type: dict
+ suboptions:
+ uuid:
+ description:
+ - The uuid of the network.
+ type: str
+ name:
+ description:
+ - The uuid of the network.
+ type: str
+ zone:
+ description:
+ - The zone the network allocated in.
+ type: str
+ gateway_address:
+ description:
+ - The gateway address of the subnet. If not set, no gateway is used.
+ - Cannot be within the DHCP range, which is the lowest .101-.254 in the subnet.
+ type: str
+ dns_servers:
+ description:
+ - A list of DNS resolver IP addresses, that act as DNS servers.
+ - If not set, the cloudscale.ch default resolvers are used.
+ type: list
+ elements: str
+ reset:
+ description:
+ - Resets I(gateway_address) and I(dns_servers) to default values by the API.
+ - "Note: Idempotency is not given."
+ type: bool
+ default: false
+ state:
+ description:
+ - State of the subnet.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ tags:
+ description:
+ - Tags associated with the subnet. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+---
+- name: Ensure subnet exists
+ cloudscale_ch.cloud.subnet:
+ cidr: 172.16.0.0/24
+ network:
+ uuid: 2db69ba3-1864-4608-853a-0771b6885a3a
+ api_token: xxxxxx
+
+- name: Ensure subnet exists
+ cloudscale_ch.cloud.subnet:
+ cidr: 192.168.1.0/24
+ gateway_address: 192.168.1.1
+ dns_servers:
+ - 192.168.1.10
+ - 192.168.1.11
+ network:
+ name: private
+ zone: lpg1
+ api_token: xxxxxx
+
+- name: Ensure a subnet is absent
+ cloudscale_ch.cloud.subnet:
+ cidr: 172.16.0.0/24
+ network:
+ name: private
+ zone: lpg1
+ state: absent
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+---
+href:
+ description: API URL to get details about the subnet.
+ returned: success
+ type: str
+ sample: https://api.cloudscale.ch/v1/subnets/33333333-1864-4608-853a-0771b6885a3
+uuid:
+ description: The unique identifier for the subnet.
+ returned: success
+ type: str
+ sample: 33333333-1864-4608-853a-0771b6885a3
+cidr:
+ description: The CIDR of the subnet.
+ returned: success
+ type: str
+ sample: 172.16.0.0/24
+network:
+ description: The network object of the subnet.
+ returned: success
+ type: complex
+ contains:
+ href:
+ description: API URL to get details about the network.
+ returned: success
+ type: str
+ sample: https://api.cloudscale.ch/v1/networks/33333333-1864-4608-853a-0771b6885a3
+ uuid:
+ description: The unique identifier for the network.
+ returned: success
+ type: str
+ sample: 33333333-1864-4608-853a-0771b6885a3
+ name:
+ description: The name of the network.
+ returned: success
+ type: str
+ sample: my network
+ zone:
+ description: The zone the network is allocated in.
+ returned: success
+ type: dict
+ sample: { 'slug': 'rma1' }
+ version_added: 1.4.0
+gateway_address:
+ description: The gateway address of the subnet.
+ returned: success
+ type: str
+ sample: "192.168.42.1"
+dns_servers:
+ description: List of DNS resolver IP addresses.
+ returned: success
+ type: list
+ sample: ["9.9.9.9", "149.112.112.112"]
+state:
+ description: State of the subnet.
+ returned: success
+ type: str
+ sample: present
+tags:
+ description: Tags associated with the subnet.
+ returned: success
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+
+class AnsibleCloudscaleSubnet(AnsibleCloudscaleBase):
+
+ def __init__(self, module):
+ super(AnsibleCloudscaleSubnet, self).__init__(
+ module=module,
+ resource_name='subnets',
+ resource_key_name='cidr',
+ resource_create_param_keys=[
+ 'cidr',
+ 'gateway_address',
+ 'dns_servers',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'gateway_address',
+ 'dns_servers',
+ 'tags',
+ ],
+ )
+ self._network = None
+
+ def query_network(self, uuid=None):
+ if self._network is not None:
+ return self._network
+
+ net_param = self._module.params['network']
+ net_uuid = uuid or net_param['uuid']
+
+ if net_uuid is not None:
+ network = self._get('networks/%s' % net_uuid)
+ if not network:
+ self._module.fail_json(msg="Network with 'uuid' not found: %s" % net_uuid)
+
+ elif net_param['name'] is not None:
+ networks_found = []
+ networks = self._get('networks')
+ for network in networks or []:
+ # Skip networks in other zones
+ if net_param['zone'] is not None and network['zone']['slug'] != net_param['zone']:
+ continue
+
+ if network.get('name') == net_param['name']:
+ networks_found.append(network)
+
+ if not networks_found:
+ msg = "Network with 'name' not found: %s" % net_param['name']
+ self._module.fail_json(msg=msg)
+
+ elif len(networks_found) == 1:
+ network = networks_found[0]
+
+ # We might have found more than one network with identical name
+ else:
+ msg = ("Multiple networks with 'name' not found: %s."
+ "Add the 'zone' to distinguish or use 'uuid' argument to specify the network." % net_param['name'])
+ self._module.fail_json(msg=msg)
+
+ else:
+ self._module.fail_json(msg="Either Network UUID or name is required.")
+
+ # For consistency, take a minimal network stub, but also include zone
+ self._network = dict()
+ for k, v in network.items():
+ if k in ['name', 'uuid', 'href', 'zone']:
+ self._network[k] = v
+
+ return self._network
+
+ def create(self, resource):
+ resource['network'] = self.query_network()
+
+ data = {
+ 'network': resource['network']['uuid'],
+ }
+ return super(AnsibleCloudscaleSubnet, self).create(resource, data)
+
+ def update(self, resource):
+ # Resets to default values by the API
+ if self._module.params.get('reset'):
+ for key in ('dns_servers', 'gateway_address',):
+ # No need to reset if user set the param anyway.
+ if self._module.params.get(key) is None:
+ self._result['changed'] = True
+ patch_data = {
+ key: None
+ }
+ if not self._module.check_mode:
+ href = resource.get('href')
+ if not href:
+ self._module.fail_json(msg='Unable to update %s, no href found.' % key)
+ self._patch(href, patch_data, filter_none=False)
+
+ if not self._module.check_mode:
+ resource = self.query()
+
+ return super(AnsibleCloudscaleSubnet, self).update(resource)
+
+ def get_result(self, resource):
+ if resource and 'network' in resource:
+ resource['network'] = self.query_network(uuid=resource['network']['uuid'])
+ return super(AnsibleCloudscaleSubnet, self).get_result(resource)
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ uuid=dict(type='str'),
+ cidr=dict(type='str'),
+ network=dict(
+ type='dict',
+ options=dict(
+ uuid=dict(type='str'),
+ name=dict(type='str'),
+ zone=dict(type='str'),
+ ),
+ ),
+ gateway_address=dict(type='str'),
+ dns_servers=dict(type='list', elements='str', default=None),
+ tags=dict(type='dict'),
+ reset=dict(type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('cidr', 'uuid',),),
+ required_together=(('cidr', 'network',),),
+ required_if=(('state', 'present', ('cidr', 'network',),),),
+ supports_check_mode=True,
+ )
+
+ cloudscale_subnet = AnsibleCloudscaleSubnet(module)
+
+ if module.params['state'] == 'absent':
+ result = cloudscale_subnet.absent()
+ else:
+ result = cloudscale_subnet.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py
new file mode 100644
index 000000000..ecc6cfcc6
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# Copyright (c) 2019, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: volume
+short_description: Manages volumes on the cloudscale.ch IaaS service.
+description:
+ - Create, attach/detach, update and delete volumes on the cloudscale.ch IaaS service.
+notes:
+ - To create a new volume at least the I(name) and I(size_gb) options
+ are required.
+ - A volume can be created and attached to a server in the same task.
+author:
+ - Gaudenz Steinlin (@gaudenz)
+ - René Moser (@resmo)
+ - Denis Krienbühl (@href)
+version_added: "1.0.0"
+options:
+ state:
+ description:
+ - State of the volume.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ name:
+ description:
+ - Name of the volume. Either name or UUID must be present to change an
+ existing volume.
+ type: str
+ uuid:
+ description:
+ - UUID of the volume. Either name or UUID must be present to change an
+ existing volume.
+ type: str
+ size_gb:
+ description:
+ - Size of the volume in GB.
+ type: int
+ type:
+ description:
+ - Type of the volume. Cannot be changed after creating the volume.
+ Defaults to C(ssd) on volume creation.
+ choices: [ ssd, bulk ]
+ type: str
+ zone:
+ description:
+ - Zone in which the volume resides (e.g. C(lpg1) or C(rma1)). Cannot be
+ changed after creating the volume. Defaults to the project default zone.
+ type: str
+ servers:
+ description:
+ - UUIDs of the servers this volume is attached to. Set this to C([]) to
+ detach the volume. Currently a volume can only be attached to a
+ single server.
+ - The aliases C(server_uuids) and C(server_uuid) are deprecated and will
+ be removed in version 3.0.0 of this collection.
+ aliases: [ server_uuids, server_uuid ]
+ type: list
+ elements: str
+ tags:
+ description:
+ - Tags associated with the volume. Set this to C({}) to clear any tags.
+ type: dict
+extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
+'''
+
+EXAMPLES = '''
+# Create a new SSD volume
+- name: Create an SSD volume
+ cloudscale_ch.cloud.volume:
+ name: my_ssd_volume
+ zone: 'lpg1'
+ size_gb: 50
+ api_token: xxxxxx
+ register: my_ssd_volume
+
+# Attach an existing volume to a server
+- name: Attach volume to server
+ cloudscale_ch.cloud.volume:
+ uuid: "{{ my_ssd_volume.uuid }}"
+ servers:
+ - ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ api_token: xxxxxx
+
+# Create and attach a volume to a server
+- name: Create and attach volume to server
+ cloudscale_ch.cloud.volume:
+ name: my_ssd_volume
+ zone: 'lpg1'
+ size_gb: 50
+ servers:
+ - ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ api_token: xxxxxx
+
+# Detach volume from server
+- name: Detach volume from server
+ cloudscale_ch.cloud.volume:
+ uuid: "{{ my_ssd_volume.uuid }}"
+ servers: []
+ api_token: xxxxxx
+
+# Delete a volume
+- name: Delete volume
+ cloudscale_ch.cloud.volume:
+ name: my_ssd_volume
+ state: absent
+ api_token: xxxxxx
+'''
+
+RETURN = '''
+href:
+ description: The API URL to get details about this volume.
+ returned: state == present
+ type: str
+ sample: https://api.cloudscale.ch/v1/volumes/2db69ba3-1864-4608-853a-0771b6885a3a
+uuid:
+ description: The unique identifier for this volume.
+ returned: state == present
+ type: str
+ sample: 2db69ba3-1864-4608-853a-0771b6885a3a
+name:
+ description: The display name of the volume.
+ returned: state == present
+ type: str
+ sample: my_ssd_volume
+size_gb:
+ description: The size of the volume in GB.
+ returned: state == present
+ type: str
+ sample: 50
+type:
+ description: The type of the volume.
+ returned: state == present
+ type: str
+ sample: bulk
+zone:
+ description: The zone of the volume.
+ returned: state == present
+ type: dict
+ sample: {'slug': 'lpg1'}
+server_uuids:
+ description: The UUIDs of the servers this volume is attached to. This return
+ value is deprecated and will disappear in the future when the field is
+ removed from the API.
+ returned: state == present
+ type: list
+ sample: ['47cec963-fcd2-482f-bdb6-24461b2d47b1']
+servers:
+ description: The list of servers this volume is attached to.
+ returned: state == present
+ type: list
+ sample: [
+ {
+ "href": "https://api.cloudscale.ch/v1/servers/47cec963-fcd2-482f-bdb6-24461b2d47b1",
+ "name": "my_server",
+ "uuid": "47cec963-fcd2-482f-bdb6-24461b2d47b1"
+ }
+ ]
+state:
+ description: The current status of the volume.
+ returned: success
+ type: str
+ sample: present
+tags:
+ description: Tags associated with the volume.
+ returned: state == present
+ type: dict
+ sample: { 'project': 'my project' }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.api import (
+ AnsibleCloudscaleBase,
+ cloudscale_argument_spec,
+)
+
+
+class AnsibleCloudscaleVolume(AnsibleCloudscaleBase):
+
+ def create(self, resource):
+ # Fail when missing params for creation
+ self._module.fail_on_missing_params(['name', 'size_gb'])
+ return super(AnsibleCloudscaleVolume, self).create(resource)
+
+ def find_difference(self, key, resource, param):
+ is_different = False
+
+ if key != 'servers':
+ return super(AnsibleCloudscaleVolume, self).find_difference(key, resource, param)
+
+ server_has = resource[key]
+ server_wanted = param
+ if len(server_wanted) != len(server_has):
+ is_different = True
+ else:
+ for has in server_has:
+ if has["uuid"] not in server_wanted:
+ is_different = True
+
+ return is_different
+
+
+def main():
+ argument_spec = cloudscale_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=('present', 'absent')),
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ zone=dict(type='str'),
+ size_gb=dict(type='int'),
+ type=dict(type='str', choices=('ssd', 'bulk')),
+ servers=dict(type='list', elements='str', aliases=['server_uuids', 'server_uuid']),
+ tags=dict(type='dict'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=(('name', 'uuid'),),
+ supports_check_mode=True,
+ )
+
+ # TODO remove in version 3.0.0
+ if module.params.get('server_uuid') or module.params.get('server_uuids'):
+ module.deprecate('The aliases "server_uuid" and "server_uuids" have '
+ 'been deprecated and will be removed, use "servers" '
+ 'instead.',
+ version='3.0.0', collection_name='cloudscale_ch.cloud')
+
+ cloudscale_volume = AnsibleCloudscaleVolume(
+ module,
+ resource_name='volumes',
+ resource_create_param_keys=[
+ 'name',
+ 'type',
+ 'zone',
+ 'size_gb',
+ 'servers',
+ 'tags',
+ ],
+ resource_update_param_keys=[
+ 'name',
+ 'size_gb',
+ 'servers',
+ 'tags',
+ ],
+ )
+
+ if module.params['state'] == 'absent':
+ result = cloudscale_volume.absent()
+ else:
+ result = cloudscale_volume.present()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template b/ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template
new file mode 100644
index 000000000..5fa5d5fcb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template
@@ -0,0 +1,2 @@
+[default]
+cloudscale_api_token = @API_TOKEN
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases
new file mode 100644
index 000000000..136c05e0d
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml
new file mode 100644
index 000000000..81387ff9c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml
@@ -0,0 +1,20 @@
+---
+# The image to use for test servers
+cloudscale_test_image: 'debian-10'
+
+# Alternate test image to use if a different image is required
+cloudscale_alt_test_image: 'ubuntu-20.04'
+
+# The flavor to use for test servers
+cloudscale_test_flavor: 'flex-4-2'
+
+# SSH key to use for test servers
+cloudscale_test_ssh_key: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible
+
+# The zone to use to test servers
+cloudscale_test_zone: 'lpg1'
+cloudscale_test_alt_zone: 'rma1'
+
+# The region to use to request floating IPs
+cloudscale_test_region: 'lpg'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml
new file mode 100644
index 000000000..ff2132c54
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml
@@ -0,0 +1,24 @@
+---
+- name: List all custom images
+ uri:
+ url: 'https://api.cloudscale.ch/v1/custom-images'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: image_list
+ until: image_list is not failed
+ retries: 5
+ delay: 3
+
+- name: Remove all images created by this test run
+ cloudscale_ch.cloud.custom_image:
+ uuid: '{{ item.uuid }}'
+ state: 'absent'
+ when: cloudscale_resource_prefix in item.name
+ with_items: '{{ image_list.json }}'
+ register: res
+ loop_control:
+ label: '{{ item.name }} ({{ item.uuid }})'
+ until: res is not failed
+ retries: 5
+ delay: 3
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml
new file mode 100644
index 000000000..663608597
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml
@@ -0,0 +1,19 @@
+---
+- name: List all floating IPs
+ uri:
+ url: 'https://api.cloudscale.ch/v1/floating-ips'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: floating_ip_list
+
+- name: Remove all floating IPs created by this test run
+ cloudscale_ch.cloud.floating_ip:
+ # TODO: fix
+ # ip: '{{ item.network | ipaddr("address") }}'
+ ip: '{{ item.network.split("/")[0] }}'
+ state: 'absent'
+ when: "cloudscale_resource_prefix in (item.reverse_ptr | string ) or ('ansible_name' in item.tags and cloudscale_resource_prefix in item.tags['ansible_name'])"
+ with_items: '{{ floating_ip_list.json }}'
+ loop_control:
+ label: '{{ item.reverse_ptr }} ({{ item.network }})'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_load_balancers.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_load_balancers.yml
new file mode 100644
index 000000000..6f286196f
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_load_balancers.yml
@@ -0,0 +1,24 @@
+---
+- name: List all load balancers
+ uri:
+ url: 'https://api.cloudscale.ch/v1/load-balancers'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: load_balancer_list
+ until: load_balancer_list is not failed
+ retries: 5
+ delay: 3
+
+- name: Remove all load balancers created by this test run
+ cloudscale_ch.cloud.load_balancer:
+ uuid: '{{ item.uuid }}'
+ state: 'absent'
+ when: cloudscale_resource_prefix in item.name
+ with_items: '{{ load_balancer_list.json }}'
+ register: res
+ loop_control:
+ label: '{{ item.name }} ({{ item.uuid }})'
+ until: res is not failed
+ retries: 5
+ delay: 3
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml
new file mode 100644
index 000000000..e02c83b1c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml
@@ -0,0 +1,17 @@
+---
+- name: List all networks
+ uri:
+ url: 'https://api.cloudscale.ch/v1/networks'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: network_list
+
+- name: Remove all networks created by this test run
+ cloudscale_ch.cloud.network:
+ uuid: '{{ item.uuid }}'
+ state: absent
+ when: cloudscale_resource_prefix in item.name
+ with_items: '{{ network_list.json }}'
+ loop_control:
+ label: '{{ item.name }} ({{ item.uuid }})'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml
new file mode 100644
index 000000000..2ddb17433
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml
@@ -0,0 +1,17 @@
+---
+- name: List all objects users
+ uri:
+ url: 'https://api.cloudscale.ch/v1/objects-users'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: objects_user_list
+
+- name: Remove all objects users created by this test run
+ cloudscale_ch.cloud.objects_user:
+ id: '{{ item.id }}'
+ state: absent
+ when: cloudscale_resource_prefix in item.display_name
+ with_items: '{{ objects_user_list.json }}'
+ loop_control:
+ label: '{{ item.display_name }} ({{ item.id }})'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml
new file mode 100644
index 000000000..cc85b1379
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml
@@ -0,0 +1,17 @@
+---
+- name: List all server groups
+ uri:
+ url: 'https://api.cloudscale.ch/v1/server-groups'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: server_group_list
+
+- name: Remove all server groups created by this test run
+ cloudscale_ch.cloud.server_group:
+ uuid: '{{ item.uuid }}'
+ state: absent
+ when: cloudscale_resource_prefix in item.name
+ with_items: '{{ server_group_list.json }}'
+ loop_control:
+ label: '{{ item.name }} ({{ item.uuid }})'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml
new file mode 100644
index 000000000..d8c3a0fa4
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml
@@ -0,0 +1,24 @@
+---
+- name: List all servers
+ uri:
+ url: 'https://api.cloudscale.ch/v1/servers'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: server_list
+ until: server_list is not failed
+ retries: 5
+ delay: 3
+
+- name: Remove all servers created by this test run
+ cloudscale_ch.cloud.server:
+ uuid: '{{ item.uuid }}'
+ state: 'absent'
+ when: cloudscale_resource_prefix in item.name
+ with_items: '{{ server_list.json }}'
+ register: res
+ loop_control:
+ label: '{{ item.name }} ({{ item.uuid }})'
+ until: res is not failed
+ retries: 5
+ delay: 3
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml
new file mode 100644
index 000000000..7ce17ba3c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml
@@ -0,0 +1,17 @@
+---
+- name: List all volumes
+ uri:
+ url: 'https://api.cloudscale.ch/v1/volumes'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: volume_list
+
+- name: Remove all volumes created by this test run
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ item.uuid }}'
+ state: 'absent'
+ when: cloudscale_resource_prefix in item.name
+ with_items: '{{ volume_list.json }}'
+ loop_control:
+ label: '{{ item.name }} ({{ item.uuid }})'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml
new file mode 100644
index 000000000..fa0be6eb8
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+# Password to use for test server
+# This has to be set as a fact, otherwise a new password will be generated
+# on every variable access.
+- set_fact:
+ cloudscale_test_password: "{{ lookup('password', '/dev/null length=15 chars=ascii_letters') }}"
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml
new file mode 100644
index 000000000..630ca4f51
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+image_url: https://at-images.objects.lpg.cloudscale.ch/alpine
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml
new file mode 100644
index 000000000..a2f4ff52b
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+- block:
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_custom_images
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml
new file mode 100644
index 000000000..c82de757d
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml
@@ -0,0 +1,415 @@
+---
+- name: Delete a non existent custom image by uuid
+ cloudscale_ch.cloud.custom_image:
+ uuid: '415caea5-da7c-4aaa-aaaa-ececd38fb8ea'
+ state: absent
+ register: delete
+- name: Verify Delete a non existent custom image by uuid
+ assert:
+ that:
+ - delete is not changed
+
+- name: Fail delete a non existent custom image by name
+ cloudscale_ch.cloud.custom_image:
+ name: this-image-is-non-existent
+ state: absent
+ register: delete
+- name: Verify Fail delete a non existend custom image by name
+ assert:
+ that:
+ - delete is not changed
+
+- name: Fail import a custom image without url
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image"
+ state: present
+ slug: custom-ansible-image
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: failed_import
+ ignore_errors: true
+- name: Verify Fail import a custom image without url
+ assert:
+ that:
+ - failed_import is failed
+ - failed_import.msg.startswith('Cannot import a new image without url.')
+
+- name: Import a custom image and wait for import
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: image1
+ retries: 15
+ delay: 5
+ until: image1.import_status == 'success'
+ failed_when: image1.import_status == 'failed'
+- name: Verify import a custom image and wait for import
+ assert:
+ that:
+ - image1.import_status == 'success'
+ - image1.name == "{{ cloudscale_resource_prefix }}-test-image"
+
+- name: Import a custom image (idempotency)
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: image
+- name: Verify import a custom image (idempotency)
+ assert:
+ that:
+ - image is not changed
+ - image.name == "{{ cloudscale_resource_prefix }}-test-image"
+ - image.uuid == image1.uuid
+
+- name: Import a custom image with bad url
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image2"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}-non-existent"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: failed_import
+ ignore_errors: true
+ retries: 15
+ delay: 5
+ until: failed_import.import_status == 'failed'
+ failed_when: failed_import.import_status == 'failed'
+- name: Verify Fail import a custom image with bad url
+ assert:
+ that:
+ - failed_import is failed
+ - failed_import.error_message.startswith('Expected HTTP 200, got HTTP 40')
+
+- name: Import a custom image with bad url (idempotency)
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image2"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}-non-existent"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: failed_import_idempotency
+ ignore_errors: true
+ retries: 15
+ delay: 5
+ until: failed_import_idempotency.import_status == 'failed'
+ failed_when: failed_import_idempotency.import_status == 'failed'
+- name: Verify Fail import a custom image with bad url (idempotency)
+ assert:
+ that:
+ - failed_import_idempotency is failed
+ - failed_import_idempotency.error_message.startswith('Expected HTTP 200, got HTTP 40')
+ - failed_import.uuid == failed_import_idempotency.uuid
+
+# This task should not loop like the ones above because otherwise each
+# invocation in the loop will create a new import because of the "force_retry"
+# parameter. It just check that a new import is created everything else is
+# already covered above.
+- name: Import a custom image with bad url (retry)
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image2"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}-non-existent"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ force_retry: true
+ register: failed_import_retry
+- name: Verify Fail import a custom image with bad url (retry)
+ assert:
+ that:
+ - failed_import.uuid != failed_import_retry.uuid
+
+- name: Import a custom image and a failed import with that name exists
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image2"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: image2
+- name: Verify import a custom image
+ assert:
+ that:
+ - image2 is changed
+ - image2.name == "{{ cloudscale_resource_prefix }}-test-image2"
+ - image2.uuid != image1.uuid
+
+- name: Wait for import
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ retries: 15
+ delay: 5
+ register: import_status
+ until: import_status.import_status == 'success'
+ failed_when: import_status.import_status == 'failed'
+- name: Verify Wait for import
+ assert:
+ that:
+ - import_status is not changed
+ - import_status.name == "{{ cloudscale_resource_prefix }}-test-image2"
+
+- name: Get image by name
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image2"
+ register: image_by_name
+- name: Verify get image by name
+ assert:
+ that:
+ - image_by_name is not changed
+ - image_by_name.uuid == image2.uuid
+
+- name: Change the name of an image
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image-with-a-new-name"
+ uuid: "{{ image2.uuid }}"
+ register: change_name
+- name: Verify Change the name of an image
+ assert:
+ that:
+ - change_name.name == "{{ cloudscale_resource_prefix }}-test-image-with-a-new-name"
+ - change_name is changed
+
+- name: Update slug of a custom image
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ slug: ansible-image-slug
+ register: image
+- name: Verify update slug of a custom image
+ assert:
+ that:
+ - image is changed
+ - image.slug == 'ansible-image-slug'
+
+- name: Get custom image with updated slug
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ register: image
+- name: Verify update slug of a custom image
+ assert:
+ that:
+ - image.slug == "ansible-image-slug"
+
+- name: Update tags of a custom image
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ tags:
+ project: luna
+ register: image
+- name: Verify update tags of a custom image
+ assert:
+ that:
+ - image is changed
+ - image.tags == "project: luna"
+
+- name: Update user_data_handling of a custom image
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ user_data_handling: 'extend-cloud-config'
+ register: image
+- name: Verify update user_data_handling of a custom image
+ assert:
+ that:
+ - image is changed
+ - image.user_data_handling == 'extend-cloud-config'
+
+- name: Get custom image with updated user_data_handling
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ register: image
+- name: Verify update user_data_handling of a custom image
+ assert:
+ that:
+ - image.user_data_handling == "extend-cloud-config"
+
+- name: Update slug, tags and user_data_handling of a custom image
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.uuid }}"
+ slug: yet-another-slug
+ tags:
+ project: jupiter
+ user_data_handling: 'pass-through'
+ register: image
+- name: Verify update slug, tags and user_data_handling of a custom image
+ assert:
+ that:
+ - image is changed
+ - image.slug == "yet-another-slug"
+ - image.tags == "project: jupiter"
+ - image.user_data_handling == "pass-through"
+
+- name: List all custom images, there should be two
+ uri:
+ url: 'https://api.cloudscale.ch/v1/custom-images'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: image_list
+- name: Verify that two custom images are created by this test run
+ assert:
+ that:
+ - image_list.json | selectattr("name","search", "{{ cloudscale_resource_prefix }}" ) | list | length == 2
+
+- name: Delete image by uuid
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image1.uuid }}"
+ state: absent
+ register: image1
+- name: Verify delete image by uuid
+ assert:
+ that:
+ - image1 is changed
+ - image1.state == 'absent'
+
+- name: Delete image by name
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ change_name.name }}"
+ state: absent
+ register: image2
+- name: Verify delete image by name
+ assert:
+ that:
+ - image2 is changed
+ - image2.state == 'absent'
+- name: Check if all images got deleted
+ uri:
+ url: 'https://api.cloudscale.ch/v1/custom-images'
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ status_code: 200
+ register: image_list
+- name: Verify that the two custom images of this test run are deleted
+ assert:
+ that:
+ - image_list.json | selectattr("name","search", "{{ cloudscale_resource_prefix }}" ) | list | length == 0
+
+- name: Import a custom image
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image"
+ state: present
+ slug: custom-ansible-image
+ url: "{{ image_url }}"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ tags:
+ project: mars
+ source_format: raw
+ register: image1
+
+- name: Import a custom for the second time (uri module)
+ uri:
+ url: 'https://api.cloudscale.ch/v1/custom-images/import'
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ url: "{{ image_url }}"
+ name: "{{ cloudscale_resource_prefix }}-test-image"
+ slug: 'custom-ansible-image'
+ zones:
+ - 'rma1'
+ user_data_handling: 'pass-through'
+ tags:
+ project: 'mars'
+ source_format: 'raw'
+ body_format: json
+ status_code: 201
+ register: image2
+
+- name: Wait for import of first image
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image1.uuid }}"
+ retries: 15
+ delay: 5
+ register: import_status
+ until: import_status.import_status == 'success'
+ failed_when: import_status.import_status == 'failed'
+
+- name: Wait for import of second image
+ cloudscale_ch.cloud.custom_image:
+ uuid: "{{ image2.json.uuid }}"
+ retries: 15
+ delay: 5
+ register: import_status
+ until: import_status.import_status == 'success'
+ failed_when: import_status.import_status == 'failed'
+
+- name: Fail get image by name when two exist
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image"
+ register: failed_image_by_name
+ ignore_errors: true
+- name: Verify Fail get image by name when two exist
+ assert:
+ that:
+ - failed_image_by_name is failed
+ - failed_image_by_name.msg.startswith("More than one custom-images resource with 'name' exists")
+
+- name: Import a custom image with firmware type uefi and wait for import
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ cloudscale_resource_prefix }}-test-image-uefi"
+ state: present
+ slug: custom-ansible-image-uefi
+ url: "{{ image_url }}"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ firmware_type: 'uefi'
+ source_format: raw
+ register: image1_uefi
+ retries: 15
+ delay: 5
+ until: image1_uefi.import_status == 'success'
+ failed_when: image1_uefi.import_status == 'failed'
+- name: Verify import a custom image and wait for import
+ assert:
+ that:
+ - image1_uefi.import_status == 'success'
+ - image1_uefi.name == "{{ cloudscale_resource_prefix }}-test-image-uefi"
+
+- name: Fail changing the firmware type of an already imported image from uefi to bios
+ cloudscale_ch.cloud.custom_image:
+ name: "{{ image1_uefi.name }}"
+ state: present
+ slug: "{{ image1_uefi.slug }}"
+ url: "{{ image_url }}"
+ zones: lpg1
+ user_data_handling: 'pass-through'
+ firmware_type: 'bios'
+ source_format: raw
+ register: fail_firmware_change
+ ignore_errors: true
+- name: Verify firmware type change failed
+ assert:
+ that:
+ - fail_firmware_change is failed
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml
new file mode 100644
index 000000000..d58d19b61
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml
@@ -0,0 +1,158 @@
+- name: Request regional floating IP in check mode
+ cloudscale_ch.cloud.floating_ip:
+ name: '{{ cloudscale_resource_prefix }}-floating-ip'
+ server: '{{ test01.uuid }}'
+ ip_version: '{{ item.ip_version }}'
+ reverse_ptr: '{{ item.reverse_ptr | default(omit) }}'
+ prefix_length: '{{ item.prefix_length | default(omit) }}'
+ region: '{{ cloudscale_test_region }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ check_mode: true
+ register: floating_ip
+- name: Verify request floating IP in check mode
+ assert:
+ that:
+ - floating_ip is changed
+ - floating_ip.state == 'absent'
+
+- name: Request regional floating IP
+ cloudscale_ch.cloud.floating_ip:
+ name: '{{ cloudscale_resource_prefix }}-floating-ip'
+ server: '{{ test01.uuid }}'
+ ip_version: '{{ item.ip_version }}'
+ reverse_ptr: '{{ item.reverse_ptr | default(omit) }}'
+ prefix_length: '{{ item.prefix_length | default(omit) }}'
+ region: '{{ cloudscale_test_region }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: floating_ip
+- name: Verify request regional floating IP
+ assert:
+ that:
+ - floating_ip is changed
+ - floating_ip.region.slug == cloudscale_test_region
+ # - (item.ip_version == 4 and floating_ip.ip | ipv4) or (item.ip_version == 6 and floating_ip.ip | ipv6)
+ - floating_ip.server == test01.uuid
+ - floating_ip.tags.project == 'ansible-test'
+ - floating_ip.tags.stage == 'production'
+ - floating_ip.tags.sla == '24-7'
+
+- name: Request regional floating IP idempotence
+ cloudscale_ch.cloud.floating_ip:
+ name: '{{ cloudscale_resource_prefix }}-floating-ip'
+ server: '{{ test01.uuid }}'
+ ip_version: '{{ item.ip_version }}'
+ reverse_ptr: '{{ item.reverse_ptr | default(omit) }}'
+ prefix_length: '{{ item.prefix_length | default(omit) }}'
+ region: '{{ cloudscale_test_region }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: floating_ip_idempotence
+- name: Verify request regional floating IP idempotence
+ assert:
+ that:
+ - floating_ip_idempotence is not changed
+ - floating_ip_idempotence.server == test01.uuid
+ - floating_ip.region.slug == cloudscale_test_region
+ - floating_ip.tags.project == 'ansible-test'
+ - floating_ip.tags.stage == 'production'
+ - floating_ip.tags.sla == '24-7'
+
+- name: Request regional floating IP different IP version in check mode
+ cloudscale_ch.cloud.floating_ip:
+ name: '{{ cloudscale_resource_prefix }}-floating-ip'
+ ip_version: '{{ 6 if item.ip_version == 4 else 4 }}'
+ reverse_ptr: '{{ item.reverse_ptr | default(omit) }}'
+ prefix_length: '{{ item.prefix_length | default(omit) }}'
+ region: '{{ cloudscale_test_region }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: floating_ip_version_differ
+ check_mode: true
+- name: Verify Request regional floating IP different IP version in check mode
+ assert:
+ that:
+ - floating_ip_version_differ is changed
+ - not floating_ip_version_differ.network
+
+- name: Request global floating IP
+ cloudscale_ch.cloud.floating_ip:
+ name: '{{ cloudscale_resource_prefix }}-global-floating-ip'
+ ip_version: '{{ item.ip_version }}'
+ reverse_ptr: '{{ item.reverse_ptr | default(omit) }}'
+ prefix_length: '{{ item.prefix_length | default(omit) }}'
+ type: 'global'
+ register: global_floating_ip
+- name: Verify global floating IP
+ assert:
+ that:
+ - global_floating_ip is changed
+ - global_floating_ip.region == None
+ - global_floating_ip.type == 'global'
+ # - (item.ip_version == 4 and global_floating_ip.ip | ipv4) or (item.ip_version == 6 and global_floating_ip.ip | ipv6)
+ - not global_floating_ip.server
+
+- name: Remove global floating IP
+ cloudscale_ch.cloud.floating_ip:
+ ip: '{{ global_floating_ip.ip }}'
+ state: 'absent'
+ register: global_floating_ip
+- name: Verify release of global floating IP
+ assert:
+ that:
+ - global_floating_ip is changed
+ - global_floating_ip.state == 'absent'
+
+- name: Move floating IP to second server
+ cloudscale_ch.cloud.floating_ip:
+ server: '{{ test02.uuid }}'
+ ip: '{{ floating_ip.ip }}'
+ register: move_ip
+- name: Verify move floating IPv4 to second server
+ assert:
+ that:
+ - move_ip is changed
+ - move_ip.server == test02.uuid
+
+- name: Remove floating IP in check mode
+ cloudscale_ch.cloud.floating_ip:
+ ip: '{{ floating_ip.ip }}'
+ state: 'absent'
+ register: release_ip
+ check_mode: true
+- name: Verify Remove floating IP in check mode
+ assert:
+ that:
+ - release_ip is changed
+ - release_ip.state == 'present'
+
+- name: Remove floating IP
+ cloudscale_ch.cloud.floating_ip:
+ ip: '{{ floating_ip.ip }}'
+ state: 'absent'
+ register: release_ip
+- name: Verify Remove floating IP
+ assert:
+ that:
+ - release_ip is changed
+ - release_ip.state == 'absent'
+
+- name: Remove floating IP idempotence
+ cloudscale_ch.cloud.floating_ip:
+ ip: '{{ floating_ip.ip }}'
+ state: 'absent'
+ register: release_ip
+- name: Verify Remove floating IP idempotence
+ assert:
+ that:
+ - release_ip is not changed
+ - release_ip.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml
new file mode 100644
index 000000000..a7046ae96
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+- name: Cloudscale floating IP tests
+ block:
+ - name: Create a server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test01'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ zone: '{{ cloudscale_test_zone }}'
+ register: test01
+
+ - name: Create a second server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test02'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ zone: '{{ cloudscale_test_zone }}'
+ register: test02
+
+ - include_tasks: floating_ip.yml
+ loop:
+ - { 'ip_version': 4, 'reverse_ptr': '{{ cloudscale_resource_prefix }}-4.example.com' }
+ - { 'ip_version': 6, 'reverse_ptr': '{{ cloudscale_resource_prefix }}-6.example.com' }
+ - { 'ip_version': 6, 'prefix_length': 56 }
+
+ - import_tasks: unassigned.yml
+
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_servers
+ - import_role:
+ name: common
+ tasks_from: cleanup_floating_ips
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml
new file mode 100644
index 000000000..4ee4ed080
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml
@@ -0,0 +1,27 @@
+---
+- name: Assign Floating IP to server test01
+ cloudscale_ch.cloud.floating_ip:
+ name: '{{ cloudscale_resource_prefix }}-unassigned'
+ ip_version: 6
+ server: '{{ test01.uuid }}'
+ reverse_ptr: '{{ cloudscale_resource_prefix }}-unassigned.example.com'
+ region: '{{ cloudscale_test_region }}'
+ register: floating_ip
+
+# The only way to have an unassigned floating IP is to delete the server
+# where the floating IP is currently assigned.
+- name: Delete server test01
+ cloudscale_ch.cloud.server:
+ uuid: '{{ test01.uuid }}'
+ state: 'absent'
+
+- name: Do not fail if floating IP is unassigned
+ cloudscale_ch.cloud.floating_ip:
+ ip: '{{ floating_ip.ip }}'
+ register: floating_ip_not_fail
+- name: Verify do not fail if floating IP is unassigned
+ assert:
+ that:
+ - floating_ip_not_fail is successful
+ - floating_ip_not_fail is not changed
+ - floating_ip_not_fail.server == None
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/defaults/main.yml
new file mode 100644
index 000000000..9be93a784
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+cloudscale_test_flavor_lb: lb-standard
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/failures.yml
new file mode 100644
index 000000000..7bad4747f
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/failures.yml
@@ -0,0 +1,67 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.load_balancer:
+ register: load_balancer
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - load_balancer is failed
+
+- name: Fail create a running load balancer with non-existing flavor
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}-non-existent'
+ zone: '{{ cloudscale_test_zone }}'
+ register: load_balancer
+ ignore_errors: True
+- name: Verify fail create a load balancer with non-existing flavor
+ assert:
+ that:
+ - load_balancer is failed
+ - '"is not a valid choice" in load_balancer.fetch_url_info.body'
+
+- name: Fail create a running load balancer with non-existing zone
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ zone: '{{ cloudscale_test_zone }}-non-existent'
+ register: load_balancer
+ ignore_errors: True
+- name: Verify fail create a load balancer with non-existing zone
+ assert:
+ that:
+ - load_balancer is failed
+ - '"is not a valid choice. Choose one of" in load_balancer.fetch_url_info.body'
+
+- name: Fail create a running load balancer with non-existing VIP UUID
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ vip_addresses:
+ - subnet: '00000-00000-00000-00000'
+ zone: '{{ cloudscale_test_zone }}'
+ register: load_balancer
+ ignore_errors: True
+- debug: var=load_balancer
+- name: Verify fail create a load balancer with non-existing VIP UUID
+ assert:
+ that:
+ - load_balancer is failed
+ - '"Must be a valid UUID." in load_balancer.fetch_url_info.body'
+
+- name: Fail create a running load balancer with incorrect VIP address
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ vip_addresses:
+ - subnet: '{{ snet.uuid }}'
+ address: '172.16.0.0'
+ zone: '{{ cloudscale_test_zone }}'
+ register: load_balancer
+ ignore_errors: True
+- name: Verify fail create a load balancer with incorrect VIP address
+ assert:
+ that:
+ - load_balancer is failed
+ - '"is not a valid IP for the specified subnet" in load_balancer.fetch_url_info.body'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/main.yml
new file mode 100644
index 000000000..26cfacda2
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- block:
+ - import_tasks: setup.yml
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_load_balancers.yml
+ - import_role:
+ name: common
+ tasks_from: cleanup_networks.yml
+
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/setup.yml
new file mode 100644
index 000000000..ce39e2506
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/setup.yml
@@ -0,0 +1,19 @@
+---
+- name: Ensure network exists
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-lb-network'
+ zone: '{{ cloudscale_test_zone }}'
+ auto_create_ipv4_subnet: false
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: net
+
+- name: Ensure subnet exists
+ cloudscale_ch.cloud.subnet:
+ cidr: 172.16.0.0/24
+ network:
+ uuid: '{{ net.uuid }}'
+ zone: '{{ cloudscale_test_zone }}'
+ register: snet
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/tests.yml
new file mode 100644
index 000000000..f654f99f0
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer/tasks/tests.yml
@@ -0,0 +1,188 @@
+---
+# Create LB
+- name: Test create a running load balancer in check mode
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ vip_addresses:
+ - subnet: '{{ snet.uuid }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
+ check_mode: yes
+- name: Verify create a load balancer in check mode
+ assert:
+ that:
+ - load_balancer is changed
+ - load_balancer.state == 'absent'
+
+- name: Test create a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ vip_addresses:
+ - subnet: '{{ snet.uuid }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
+- name: Verify create a running load balancer
+ assert:
+ that:
+ - load_balancer is changed
+ - load_balancer.status == 'running'
+ - load_balancer.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer.flavor.slug == '{{ cloudscale_test_flavor_lb }}'
+ - load_balancer.vip_addresses[0].subnet.uuid == '{{ snet.uuid }}'
+ - load_balancer.zone.slug == '{{ cloudscale_test_zone }}'
+ - load_balancer.tags.project == 'ansible-test'
+ - load_balancer.tags.stage == 'production'
+ - load_balancer.tags.sla == '24-7'
+
+- name: Test create a running load balancer idempotence
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ vip_addresses:
+ - subnet: '{{ snet.uuid }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
+- name: Verify create a running load balancer idempotence
+ assert:
+ that:
+ - load_balancer is not changed
+ - load_balancer.status == 'running'
+ - load_balancer.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer.flavor.slug == '{{ cloudscale_test_flavor_lb }}'
+ - load_balancer.vip_addresses[0].subnet.uuid == '{{ snet.uuid }}'
+ - load_balancer.zone.slug == '{{ cloudscale_test_zone }}'
+ - load_balancer.tags.project == 'ansible-test'
+ - load_balancer.tags.stage == 'production'
+ - load_balancer.tags.sla == '24-7'
+
+- name: Test create a running load balancer with default flavor
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test2'
+ zone: '{{ cloudscale_test_zone }}'
+ register: load_balancer_default_flavor
+- name: Verify create a running load balancer with default flavor
+ assert:
+ that:
+ - load_balancer_default_flavor is changed
+ - load_balancer_default_flavor.status == 'running'
+ - load_balancer_default_flavor.name == '{{ cloudscale_resource_prefix }}-test2'
+ - load_balancer_default_flavor.flavor.slug == '{{ cloudscale_test_flavor_lb }}'
+ - load_balancer_default_flavor.zone.slug == '{{ cloudscale_test_zone }}'
+
+# Get LB facts
+- name: Test get facts of a load balancer by name
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ load_balancer.name }}'
+ register: load_balancer
+- name: Verify get a load balancer
+ assert:
+ that:
+ - load_balancer is not changed
+ - load_balancer.name == '{{ cloudscale_resource_prefix }}-test'
+
+# Update running LB
+- name: Test update name and tags of a running load balancer in check mode
+ cloudscale_ch.cloud.load_balancer:
+ uuid: '{{ load_balancer.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer
+ check_mode: yes
+- name: Verify update name and tags of a running load balancer in check mode
+ assert:
+ that:
+ - load_balancer is changed
+ - load_balancer.status == 'running'
+ - load_balancer.name == '{{ cloudscale_resource_prefix }}-test'
+
+- name: Test update name and tags of a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ uuid: '{{ load_balancer.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer
+- name: Verify update name of a running load balancer
+ assert:
+ that:
+ - load_balancer is changed
+ - load_balancer.status == 'running'
+ - load_balancer.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer.flavor.slug == '{{ cloudscale_test_flavor_lb }}'
+ - load_balancer.tags.project == 'ansible-test'
+ - load_balancer.tags.stage == 'staging'
+ - load_balancer.tags.sla == '8-5'
+
+- name: Test update name of a running load balancer idempotence
+ cloudscale_ch.cloud.load_balancer:
+ uuid: '{{ load_balancer.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer
+- name: Verify update name of a running load balancer idempotence
+ assert:
+ that:
+ - load_balancer is not changed
+ - load_balancer.status == 'running'
+ - load_balancer.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer.flavor.slug == '{{ cloudscale_test_flavor_lb }}'
+ - load_balancer.tags.project == 'ansible-test'
+ - load_balancer.tags.stage == 'staging'
+ - load_balancer.tags.sla == '8-5'
+
+# Delete LB
+- name: Test running load balancer deletion by name in check mode
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer
+ check_mode: yes
+- name: Verify running load balancer deletion by name in check mode
+ assert:
+ that:
+ - load_balancer is changed
+ - load_balancer.status == 'running'
+
+- name: Test running load balancer deletion by name
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer
+- name: Verify running load balancer deletion by name
+ assert:
+ that:
+ - load_balancer is changed
+ - load_balancer.state == 'absent'
+
+- name: Test running load balancer deletion by name idempotence
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer
+- name: Verify running load balancer deletion by name idempotence
+ assert:
+ that:
+ - load_balancer is not changed
+ - load_balancer.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/defaults/main.yml
new file mode 100644
index 000000000..4e3722295
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+cloudscale_test_flavor_lb: lb-standard
+cloudscale_test_algorithm_lb_pool: round_robin
+cloudscale_test_protocol_lb_pool: tcp
+
+cloudscale_test_delay_lb_monitor: 2
+cloudscale_test_timeout_lb_monitor: 1
+cloudscale_test_up_threshold_lb_monitor: 3
+cloudscale_test_down_threshold_lb_monitor: 4
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/failures.yml
new file mode 100644
index 000000000..b52270398
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/failures.yml
@@ -0,0 +1,60 @@
+---
+- name: Fail create a load balancer health monitor with missing params
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ register: load_balancer_health_monitor
+ ignore_errors: True
+- name: Verify fail create a load balancer health monitor with missing params
+ assert:
+ that:
+ - load_balancer_health_monitor is failed
+ - '"This field is required" in load_balancer_health_monitor.fetch_url_info.body'
+
+- name: Fail create a load balancer health monitor with missing type
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool.uuid }}'
+ register: load_balancer_health_monitor
+ ignore_errors: True
+- name: Verify fail create a load balancer health monitor with missing type
+ assert:
+ that:
+ - load_balancer_health_monitor is failed
+ - '"This field is required" in load_balancer_health_monitor.fetch_url_info.body'
+
+- name: Fail create a load balancer health monitor with non-existing load balancer pool
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '15264769-ac69-4809-a8e4-4d73f8f92496'
+ register: load_balancer_health_monitor
+ ignore_errors: True
+- name: Verify fail create a load balancer listener with non-existing load balancer
+ assert:
+ that:
+ - load_balancer_health_monitor is failed
+ - '"This field is required" in load_balancer_health_monitor.fetch_url_info.body'
+
+- name: Fail create a load balancer health monitor with invalid version/host combination
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool.uuid }}'
+ delay_s: '{{ cloudscale_test_delay_lb_monitor }}'
+ timeout_s: '{{ cloudscale_test_timeout_lb_monitor }}'
+ up_threshold: '{{ cloudscale_test_up_threshold_lb_monitor }}'
+ down_threshold: '{{ cloudscale_test_down_threshold_lb_monitor }}'
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'GET'
+ url_path: '/'
+ version: '1.0'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+ ignore_errors: True
+- name: Verify fail create a load balancer health monitor with invalid version/host combination
+ assert:
+ that:
+ - load_balancer_health_monitor is failed
+ - '"The host argument can not be used with HTTP version 1.0" in load_balancer_health_monitor.fetch_url_info.body'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/main.yml
new file mode 100644
index 000000000..5623b6cab
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- block:
+ - import_tasks: setup.yml
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_load_balancers.yml
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/setup.yml
new file mode 100644
index 000000000..76d0b5abe
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/setup.yml
@@ -0,0 +1,35 @@
+---
+- name: Test create a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
+
+- name: Test create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool
+
+- name: Test create a 2nd load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test-ping'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_ping \ No newline at end of file
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/tests.yml
new file mode 100644
index 000000000..ab68cbdfa
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_health_monitor/tasks/tests.yml
@@ -0,0 +1,412 @@
+---
+# Create LB health monitor
+- name: Test create a load balancer health monitor (PING) in check mode
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool_ping.uuid }}'
+ type: 'ping'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+ check_mode: yes
+- name: Verify create a load balancer healtch monitor (PING) in check mode
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'absent'
+
+- name: Test create a load balancer health monitor (PING)
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool_ping.uuid }}'
+ type: 'ping'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify create a load balancer healtch monitor (PING)
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool_ping.uuid }}'
+ - load_balancer_health_monitor.type == 'ping'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+- name: Test create a load balancer health monitor (PING) idempotence
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool_ping.uuid }}'
+ type: 'ping'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify create a load balancer health monitor (PING) idempotence
+ assert:
+ that:
+ - load_balancer_health_monitor is not changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool_ping.uuid }}'
+ - load_balancer_health_monitor.type == 'ping'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+- name: Test create a load balancer health monitor (HTTP) in check mode
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool.uuid }}'
+ delay_s: '{{ cloudscale_test_delay_lb_monitor }}'
+ timeout_s: '{{ cloudscale_test_timeout_lb_monitor }}'
+ up_threshold: '{{ cloudscale_test_up_threshold_lb_monitor }}'
+ down_threshold: '{{ cloudscale_test_down_threshold_lb_monitor }}'
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'GET'
+ url_path: '/'
+ version: '1.1'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+ check_mode: yes
+- name: Verify create a load balancer healtch monitor (HTTP) in check mode
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'absent'
+
+- name: Test create a load balancer health monitor (HTTP)
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool.uuid }}'
+ delay_s: '{{ cloudscale_test_delay_lb_monitor }}'
+ timeout_s: '{{ cloudscale_test_timeout_lb_monitor }}'
+ up_threshold: '{{ cloudscale_test_up_threshold_lb_monitor }}'
+ down_threshold: '{{ cloudscale_test_down_threshold_lb_monitor }}'
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'GET'
+ url_path: '/'
+ version: '1.1'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify create a load balancer healtch monitor (HTTP)
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == {{ cloudscale_test_delay_lb_monitor }}
+ - load_balancer_health_monitor.timeout_s == {{ cloudscale_test_timeout_lb_monitor }}
+ - load_balancer_health_monitor.up_threshold == {{ cloudscale_test_up_threshold_lb_monitor }}
+ - load_balancer_health_monitor.down_threshold == {{ cloudscale_test_down_threshold_lb_monitor }}
+ - load_balancer_health_monitor.type == 'http'
+ - '"200" in load_balancer_health_monitor.http.expected_codes'
+ - '"202" in load_balancer_health_monitor.http.expected_codes'
+ - load_balancer_health_monitor.http.method == 'GET'
+ - load_balancer_health_monitor.http.url_path == '/'
+ - load_balancer_health_monitor.http.version == '1.1'
+ - load_balancer_health_monitor.http.host == 'host1'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+- name: Test create a load balancer health monitor (HTTP) idempotence
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ pool: '{{ load_balancer_pool.uuid }}'
+ delay_s: '{{ cloudscale_test_delay_lb_monitor }}'
+ timeout_s: '{{ cloudscale_test_timeout_lb_monitor }}'
+ up_threshold: '{{ cloudscale_test_up_threshold_lb_monitor }}'
+ down_threshold: '{{ cloudscale_test_down_threshold_lb_monitor }}'
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'GET'
+ url_path: '/'
+ version: '1.1'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify create a load balancer health monitor (HTTP) idempotence
+ assert:
+ that:
+ - load_balancer_health_monitor is not changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == {{ cloudscale_test_delay_lb_monitor }}
+ - load_balancer_health_monitor.timeout_s == {{ cloudscale_test_timeout_lb_monitor }}
+ - load_balancer_health_monitor.up_threshold == {{ cloudscale_test_up_threshold_lb_monitor }}
+ - load_balancer_health_monitor.down_threshold == {{ cloudscale_test_down_threshold_lb_monitor }}
+ - load_balancer_health_monitor.type == 'http'
+ - '"200" in load_balancer_health_monitor.http.expected_codes'
+ - '"202" in load_balancer_health_monitor.http.expected_codes'
+ - load_balancer_health_monitor.http.method == 'GET'
+ - load_balancer_health_monitor.http.version == '1.1'
+ - load_balancer_health_monitor.http.host == 'host1'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+# Get LB health monitor facts
+- name: Test get facts of a load balancer health monitor by UUID
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ register: load_balancer_health_monitor_facts
+- name: Verify get a load balancer health monitor by UUID
+ assert:
+ that:
+ - load_balancer_health_monitor_facts is not changed
+
+# Update LB health monitor
+- name: Test update timeouts for a load balancer health monitor in check mode
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+ check_mode: yes
+- name: Verify update timeouts for a load balancer health monitor in check mode
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == {{ cloudscale_test_delay_lb_monitor }}
+ - load_balancer_health_monitor.timeout_s == {{ cloudscale_test_timeout_lb_monitor }}
+ - load_balancer_health_monitor.up_threshold == {{ cloudscale_test_up_threshold_lb_monitor }}
+ - load_balancer_health_monitor.down_threshold == {{ cloudscale_test_down_threshold_lb_monitor }}
+
+- name: Test update timeouts for a load balancer health monitor
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify update timeouts for a load balancer health monitor
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == 2
+ - load_balancer_health_monitor.timeout_s == 1
+ - load_balancer_health_monitor.up_threshold == 2
+ - load_balancer_health_monitor.down_threshold == 3
+ - load_balancer_health_monitor.type == 'http'
+ - '"200" in load_balancer_health_monitor.http.expected_codes'
+ - '"202" in load_balancer_health_monitor.http.expected_codes'
+ - load_balancer_health_monitor.http.method == 'GET'
+ - load_balancer_health_monitor.http.url_path == '/'
+ - load_balancer_health_monitor.http.version == '1.1'
+ - load_balancer_health_monitor.http.host == 'host1'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+- name: Test update timeouts for a load balancer health monitor idempotence
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify update timeouts for a load balancer health monitor idempotence
+ assert:
+ that:
+ - load_balancer_health_monitor is not changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == 2
+ - load_balancer_health_monitor.timeout_s == 1
+ - load_balancer_health_monitor.up_threshold == 2
+ - load_balancer_health_monitor.down_threshold == 3
+ - load_balancer_health_monitor.type == 'http'
+ - '"200" in load_balancer_health_monitor.http.expected_codes'
+ - '"202" in load_balancer_health_monitor.http.expected_codes'
+ - load_balancer_health_monitor.http.method == 'GET'
+ - load_balancer_health_monitor.http.url_path == '/'
+ - load_balancer_health_monitor.http.version == '1.1'
+ - load_balancer_health_monitor.http.host == 'host1'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+- name: Test update HTTP method of a load balancer health monitor in check mode
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ type: 'http'
+ http:
+ method: 'CONNECT'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+ check_mode: yes
+- name: Verify update HTTP method of a load balancer healtch monitor in check mode
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.http.method == 'GET'
+
+- name: Test update HTTP method of a load balancer health monitor
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'CONNECT'
+ url_path: '/'
+ version: '1.1'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify update HTTP method of a load balancer healtch monitor
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == 2
+ - load_balancer_health_monitor.timeout_s == 1
+ - load_balancer_health_monitor.up_threshold == 2
+ - load_balancer_health_monitor.down_threshold == 3
+ - load_balancer_health_monitor.type == 'http'
+ - '"200" in load_balancer_health_monitor.http.expected_codes'
+ - '"202" in load_balancer_health_monitor.http.expected_codes'
+ - load_balancer_health_monitor.http.method == 'CONNECT'
+ - load_balancer_health_monitor.http.url_path == '/'
+ - load_balancer_health_monitor.http.version == '1.1'
+ - load_balancer_health_monitor.http.host == 'host1'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+- name: Test update HTTP method of a load balancer health monitor idempotence
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ delay_s: 2
+ timeout_s: 1
+ up_threshold: 2
+ down_threshold: 3
+ type: 'http'
+ http:
+ expected_codes:
+ - 200
+ - 202
+ method: 'CONNECT'
+ url_path: '/'
+ version: '1.1'
+ host: 'host1'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_health_monitor
+- name: Verify update HTTP method of a load balancer healtch monitor idempotence
+ assert:
+ that:
+ - load_balancer_health_monitor is not changed
+ - load_balancer_health_monitor.state == 'present'
+ - load_balancer_health_monitor.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_health_monitor.delay_s == 2
+ - load_balancer_health_monitor.timeout_s == 1
+ - load_balancer_health_monitor.up_threshold == 2
+ - load_balancer_health_monitor.down_threshold == 3
+ - load_balancer_health_monitor.type == 'http'
+ - '"200" in load_balancer_health_monitor.http.expected_codes'
+ - '"202" in load_balancer_health_monitor.http.expected_codes'
+ - load_balancer_health_monitor.http.method == 'CONNECT'
+ - load_balancer_health_monitor.http.url_path == '/'
+ - load_balancer_health_monitor.http.version == '1.1'
+ - load_balancer_health_monitor.http.host == 'host1'
+ - load_balancer_health_monitor.tags.project == 'ansible-test'
+ - load_balancer_health_monitor.tags.stage == 'production'
+ - load_balancer_health_monitor.tags.sla == '24-7'
+
+# Delete LB health monitor
+- name: Test load balancer health monitor deletion by UUID in check mode
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ state: absent
+ register: load_balancer_health_monitor
+ check_mode: yes
+- name: Verify load balancer health monitor deletion by UUID in check mode
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.uuid == '{{ load_balancer_health_monitor.uuid }}'
+
+- name: Test load balancer health monitor deletion by UUID
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ state: absent
+ register: load_balancer_health_monitor
+- name: Verify load balancer health monitor deletion by UUID
+ assert:
+ that:
+ - load_balancer_health_monitor is changed
+ - load_balancer_health_monitor.state == 'absent'
+
+- name: Test load balancer health monitor deletion by UUID idempotence
+ cloudscale_ch.cloud.load_balancer_health_monitor:
+ uuid: '{{ load_balancer_health_monitor.uuid }}'
+ state: absent
+ register: load_balancer_health_monitor
+- name: Verify load balancer health monitor deletion by UUID idempotence
+ assert:
+ that:
+ - load_balancer_health_monitor is not changed
+ - load_balancer_health_monitor.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/defaults/main.yml
new file mode 100644
index 000000000..ca4d3079c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+cloudscale_test_flavor_lb: lb-standard
+cloudscale_test_algorithm_lb_pool: round_robin
+cloudscale_test_protocol_lb_pool: tcp
+
+cloudscale_test_listener_protocol: tcp
+cloudscale_test_protocol_port: 8080
+cloudscale_test_allowed_cidr_v4: 192.168.3.0/24
+cloudscale_test_allowed_cidr_v6: 2001:db8:85a3:8d3::/64
+cloudscale_test_timeout_client_data_ms: 40000
+cloudscale_test_timeout_member_connect_ms: 4000
+cloudscale_test_timeout_member_data_ms: 40000
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/failures.yml
new file mode 100644
index 000000000..a4a2110ad
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/failures.yml
@@ -0,0 +1,69 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.load_balancer_listener:
+ register: load_balancer_listener
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - load_balancer_listener is failed
+
+- name: Fail create a load balancer listener with non-existing load balancer pool
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '15264769-ac69-4809-a8e4-4d73f8f92496'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ register: load_balancer_listener
+ ignore_errors: True
+- name: Verify fail create a load balancer listener with non-existing load balancer
+ assert:
+ that:
+ - load_balancer_listener is failed
+ - '"does not exist" in load_balancer_listener.fetch_url_info.body'
+
+- name: Fail create a load balancer listener with non-existing protocol
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: pct'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ register: load_balancer_listener
+ ignore_errors: True
+- name: Verify fail create a load balancer listener with non-existing protocol
+ assert:
+ that:
+ - load_balancer_listener is failed
+ - '"Expect one of" in load_balancer_listener.fetch_url_info.body'
+
+- name: Fail create a load balancer listener with invalid port
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: 11111111
+ register: load_balancer_listener
+ ignore_errors: True
+- debug: var=load_balancer_listener
+- name: Verify fail create a load balancer listener with invalid port
+ assert:
+ that:
+ - load_balancer_listener is failed
+ - '"The protocol_port must be between" in load_balancer_listener.fetch_url_info.body'
+
+- name: Fail create a load balancer listener with invalid cidr
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '1.1.1.1'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ register: load_balancer_listener
+ ignore_errors: True
+- name: Verify fail create a load balancer listener with invalid cidr
+ assert:
+ that:
+ - load_balancer_listener is failed
+ - '"Enter a valid network address in CIDR notation" in load_balancer_listener.fetch_url_info.body'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/main.yml
new file mode 100644
index 000000000..5623b6cab
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- block:
+ - import_tasks: setup.yml
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_load_balancers.yml
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/setup.yml
new file mode 100644
index 000000000..7d98f66e6
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/setup.yml
@@ -0,0 +1,23 @@
+---
+- name: Test create a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
+
+- name: Test create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/tests.yml
new file mode 100644
index 000000000..54f110b0d
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_listener/tasks/tests.yml
@@ -0,0 +1,248 @@
+---
+# Create LB listener
+- name: Test create a load balancer listener in check mode
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '{{ cloudscale_test_allowed_cidr_v4 }}'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ timeout_client_data_ms: '{{ cloudscale_test_timeout_client_data_ms }}'
+ timeout_member_connect_ms: '{{ cloudscale_test_timeout_member_connect_ms }}'
+ timeout_member_data_ms: '{{ cloudscale_test_timeout_member_data_ms }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_listener
+ check_mode: yes
+- name: Verify create a load balancer listener in check mode
+ assert:
+ that:
+ - load_balancer_listener is changed
+ - load_balancer_listener.state == 'absent'
+
+- name: Test create a load balancer listener
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '{{ cloudscale_test_allowed_cidr_v4 }}'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ timeout_client_data_ms: '{{ cloudscale_test_timeout_client_data_ms }}'
+ timeout_member_connect_ms: '{{ cloudscale_test_timeout_member_connect_ms }}'
+ timeout_member_data_ms: '{{ cloudscale_test_timeout_member_data_ms }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_listener
+- name: Verify create a load balancer listener
+ assert:
+ that:
+ - load_balancer_listener is changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_listener.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_listener.protocol == '{{ cloudscale_test_listener_protocol }}'
+ - load_balancer_listener.protocol_port == {{ cloudscale_test_protocol_port }}
+ - "'{{ cloudscale_test_allowed_cidr_v4 }}' in load_balancer_listener.allowed_cidrs"
+ - "'{{ cloudscale_test_allowed_cidr_v6 }}' in load_balancer_listener.allowed_cidrs"
+ - load_balancer_listener.timeout_client_data_ms == {{ cloudscale_test_timeout_client_data_ms }}
+ - load_balancer_listener.timeout_member_connect_ms == {{ cloudscale_test_timeout_member_connect_ms }}
+ - load_balancer_listener.timeout_member_data_ms == {{ cloudscale_test_timeout_member_data_ms }}
+ - load_balancer_listener.tags.project == 'ansible-test'
+ - load_balancer_listener.tags.stage == 'production'
+ - load_balancer_listener.tags.sla == '24-7'
+
+- name: Test create a load balancer listener idempotence
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '{{ cloudscale_test_allowed_cidr_v4 }}'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ timeout_client_data_ms: '{{ cloudscale_test_timeout_client_data_ms }}'
+ timeout_member_connect_ms: '{{ cloudscale_test_timeout_member_connect_ms }}'
+ timeout_member_data_ms: '{{ cloudscale_test_timeout_member_data_ms }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_listener
+- name: Verify create a load balancer listener idempotence
+ assert:
+ that:
+ - load_balancer_listener is not changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_listener.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_listener.protocol == '{{ cloudscale_test_listener_protocol }}'
+ - load_balancer_listener.protocol_port == {{ cloudscale_test_protocol_port }}
+ - "'{{ cloudscale_test_allowed_cidr_v4 }}' in load_balancer_listener.allowed_cidrs"
+ - "'{{ cloudscale_test_allowed_cidr_v6 }}' in load_balancer_listener.allowed_cidrs"
+ - load_balancer_listener.timeout_client_data_ms == {{ cloudscale_test_timeout_client_data_ms }}
+ - load_balancer_listener.timeout_member_connect_ms == {{ cloudscale_test_timeout_member_connect_ms }}
+ - load_balancer_listener.timeout_member_data_ms == {{ cloudscale_test_timeout_member_data_ms }}
+ - load_balancer_listener.tags.project == 'ansible-test'
+ - load_balancer_listener.tags.stage == 'production'
+ - load_balancer_listener.tags.sla == '24-7'
+
+# Get LB listener facts
+- name: Test get facts of a load balancer listener by name
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ register: load_balancer_listener
+- name: Verify get a load balancer listener by name
+ assert:
+ that:
+ - load_balancer_listener is not changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test'
+
+# Update an existing LB listener
+- name: Test update a load balancer listener in check mode
+ cloudscale_ch.cloud.load_balancer_listener:
+ uuid: '{{ load_balancer_listener.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '192.168.1.0/24'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ timeout_client_data_ms: 40001
+ timeout_member_connect_ms: 4001
+ timeout_member_data_ms: 40001
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer_listener
+ check_mode: yes
+- name: Verify update a load balancer listener in check mode
+ assert:
+ that:
+ - load_balancer_listener is changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_listener.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_listener.protocol == '{{ cloudscale_test_listener_protocol }}'
+ - load_balancer_listener.protocol_port == {{ cloudscale_test_protocol_port }}
+ - "'{{ cloudscale_test_allowed_cidr_v4 }}' in load_balancer_listener.allowed_cidrs"
+ - "'{{ cloudscale_test_allowed_cidr_v6 }}' in load_balancer_listener.allowed_cidrs"
+ - load_balancer_listener.timeout_client_data_ms == {{ cloudscale_test_timeout_client_data_ms }}
+ - load_balancer_listener.timeout_member_connect_ms == {{ cloudscale_test_timeout_member_connect_ms }}
+ - load_balancer_listener.timeout_member_data_ms == {{ cloudscale_test_timeout_member_data_ms }}
+ - load_balancer_listener.tags.project == 'ansible-test'
+ - load_balancer_listener.tags.stage == 'production'
+ - load_balancer_listener.tags.sla == '24-7'
+
+- name: Test update a load balancer listener
+ cloudscale_ch.cloud.load_balancer_listener:
+ uuid: '{{ load_balancer_listener.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '192.168.1.0/24'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ timeout_client_data_ms: 40001
+ timeout_member_connect_ms: 4001
+ timeout_member_data_ms: 40001
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer_listener
+- name: Verify update a load balancer listener
+ assert:
+ that:
+ - load_balancer_listener is changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer_listener.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_listener.protocol == '{{ cloudscale_test_listener_protocol }}'
+ - load_balancer_listener.protocol_port == {{ cloudscale_test_protocol_port }}
+ - "'{{ cloudscale_test_allowed_cidr_v4 }}' not in load_balancer_listener.allowed_cidrs"
+ - "'192.168.1.0/24' in load_balancer_listener.allowed_cidrs"
+ - "'{{ cloudscale_test_allowed_cidr_v6 }}' in load_balancer_listener.allowed_cidrs"
+ - load_balancer_listener.timeout_client_data_ms == 40001
+ - load_balancer_listener.timeout_member_connect_ms == 4001
+ - load_balancer_listener.timeout_member_data_ms == 40001
+ - load_balancer_listener.tags.project == 'ansible-test'
+ - load_balancer_listener.tags.stage == 'staging'
+ - load_balancer_listener.tags.sla == '8-5'
+
+- name: Test update a load balancer listener idempotence
+ cloudscale_ch.cloud.load_balancer_listener:
+ uuid: '{{ load_balancer_listener.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ pool: '{{ load_balancer_pool.uuid }}'
+ protocol: '{{ cloudscale_test_listener_protocol }}'
+ protocol_port: '{{ cloudscale_test_protocol_port }}'
+ allowed_cidrs:
+ - '192.168.1.0/24'
+ - '{{ cloudscale_test_allowed_cidr_v6 }}'
+ timeout_client_data_ms: 40001
+ timeout_member_connect_ms: 4001
+ timeout_member_data_ms: 40001
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer_listener
+- name: Verify update a load balancer listener idempotence
+ assert:
+ that:
+ - load_balancer_listener is not changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer_listener.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_listener.protocol == '{{ cloudscale_test_listener_protocol }}'
+ - load_balancer_listener.protocol_port == {{ cloudscale_test_protocol_port }}
+ - "'{{ cloudscale_test_allowed_cidr_v4 }}' not in load_balancer_listener.allowed_cidrs"
+ - "'192.168.1.0/24' in load_balancer_listener.allowed_cidrs"
+ - "'{{ cloudscale_test_allowed_cidr_v6 }}' in load_balancer_listener.allowed_cidrs"
+ - load_balancer_listener.timeout_client_data_ms == 40001
+ - load_balancer_listener.timeout_member_connect_ms == 4001
+ - load_balancer_listener.timeout_member_data_ms == 40001
+ - load_balancer_listener.tags.project == 'ansible-test'
+ - load_balancer_listener.tags.stage == 'staging'
+ - load_balancer_listener.tags.sla == '8-5'
+
+# Delete LB listener
+- name: Test load balancer listener deletion by name in check mode
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer_listener
+ check_mode: yes
+- name: Verify load balancer listener deletion by name in check mode
+ assert:
+ that:
+ - load_balancer_listener is changed
+ - load_balancer_listener.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Test load balancer listener deletion by name
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer_listener
+- name: Verify load balancer listener deletion by name
+ assert:
+ that:
+ - load_balancer_listener is changed
+ - load_balancer_listener.state == 'absent'
+
+- name: Test load balancer listener deletion by name idempotence
+ cloudscale_ch.cloud.load_balancer_listener:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer_listener
+- name: Verify load balancer listener deletion by name idempotence
+ assert:
+ that:
+ - load_balancer_listener is not changed
+ - load_balancer_listener.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/defaults/main.yml
new file mode 100644
index 000000000..90a4898eb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+cloudscale_test_flavor_lb: lb-standard
+cloudscale_test_algorithm_lb_pool: round_robin
+cloudscale_test_protocol_lb_pool: tcp
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/failures.yml
new file mode 100644
index 000000000..67f9cd2a3
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/failures.yml
@@ -0,0 +1,51 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.load_balancer_pool:
+ register: load_balancer_pool
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - load_balancer_pool is failed
+
+- name: Fail create a load balancer pool with non-existing load balancer
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '15264769-ac69-4809-a8e4-4d73f8f92496'
+ algorithm: 'round_robin'
+ protocol: 'tcp'
+ register: load_balancer_pool
+ ignore_errors: True
+- name: Verify fail create a load balancer pool with non-existing load balancer
+ assert:
+ that:
+ - load_balancer_pool is failed
+ - '"does not exist" in load_balancer_pool.fetch_url_info.body'
+
+- name: Fail create a load balancer pool with non-existing algorithm
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: 'robin_round'
+ protocol: 'tcp'
+ register: load_balancer_pool
+ ignore_errors: True
+- name: Verify fail create a load balancer pool with non-existing algorithm
+ assert:
+ that:
+ - load_balancer_pool is failed
+ - '"Expect one of" in load_balancer_pool.fetch_url_info.body'
+
+- name: Fail create a load balancer pool with non-existing protocol
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: 'round_robin'
+ protocol: 'pct'
+ register: load_balancer_pool
+ ignore_errors: True
+- name: Verify fail create a load balancer pool with non-existing protocol
+ assert:
+ that:
+ - load_balancer_pool is failed
+ - '"Expect one of" in load_balancer_pool.fetch_url_info.body'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/main.yml
new file mode 100644
index 000000000..c76b5ede4
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- block:
+ - import_tasks: setup.yml
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_load_balancers.yml
+ - import_role:
+ name: common
+ tasks_from: cleanup_networks.yml
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/setup.yml
new file mode 100644
index 000000000..266131eba
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/setup.yml
@@ -0,0 +1,11 @@
+---
+- name: Test create a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/tests.yml
new file mode 100644
index 000000000..aaac4a0f7
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool/tasks/tests.yml
@@ -0,0 +1,160 @@
+---
+# Create LB Pool
+- name: Test create a load balancer pool in check mode
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool
+ check_mode: yes
+- name: Verify create a load balancer pool in check mode
+ assert:
+ that:
+ - load_balancer_pool is changed
+ - load_balancer_pool.state == 'absent'
+
+- name: Test create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool
+- name: Verify create a load balancer pool
+ assert:
+ that:
+ - load_balancer_pool is changed
+ - load_balancer_pool.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool.load_balancer.uuid == '{{ load_balancer.uuid }}'
+ - load_balancer_pool.tags.project == 'ansible-test'
+ - load_balancer_pool.tags.stage == 'production'
+ - load_balancer_pool.tags.sla == '24-7'
+
+- name: Test create a load balancer pool idempotence
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool
+- name: Verify create a load balancer pool idempotence
+ assert:
+ that:
+ - load_balancer_pool is not changed
+ - load_balancer_pool.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool.load_balancer.uuid == '{{ load_balancer.uuid }}'
+ - load_balancer_pool.tags.project == 'ansible-test'
+ - load_balancer_pool.tags.stage == 'production'
+ - load_balancer_pool.tags.sla == '24-7'
+
+# Get LB Pool facts
+- name: Test get facts of a load balancer pool by name
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ register: load_balancer_pool
+- name: Verify get a load balancer pool by name
+ assert:
+ that:
+ - load_balancer_pool is not changed
+ - load_balancer_pool.name == '{{ cloudscale_resource_prefix }}-test'
+
+# Update LB Pool
+- name: Test update name and tags of a running load balancer pool in check mode
+ cloudscale_ch.cloud.load_balancer_pool:
+ uuid: '{{ load_balancer_pool.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer_pool
+ check_mode: yes
+- name: Verify update name and tags of a running load balancer pool in check mode
+ assert:
+ that:
+ - load_balancer_pool is changed
+ - load_balancer_pool.name == '{{ cloudscale_resource_prefix }}-test'
+
+- name: Test update name and tags of a running load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ uuid: '{{ load_balancer_pool.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer_pool
+- name: Verify update name of a running load balancer pool
+ assert:
+ that:
+ - load_balancer_pool is changed
+ - load_balancer_pool.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer_pool.tags.project == 'ansible-test'
+ - load_balancer_pool.tags.stage == 'staging'
+ - load_balancer_pool.tags.sla == '8-5'
+
+- name: Test update name of a load balancer pool idempotence
+ cloudscale_ch.cloud.load_balancer_pool:
+ uuid: '{{ load_balancer_pool.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: load_balancer_pool
+- name: Verify update name of a load balancer pool idempotence
+ assert:
+ that:
+ - load_balancer_pool is not changed
+ - load_balancer_pool.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer_pool.tags.project == 'ansible-test'
+ - load_balancer_pool.tags.stage == 'staging'
+ - load_balancer_pool.tags.sla == '8-5'
+
+# Delete LB Pool
+- name: Test load balancer pool deletion by name in check mode
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer_pool
+ check_mode: yes
+- name: Verify load balancer pool deletion by name in check mode
+ assert:
+ that:
+ - load_balancer_pool is changed
+ - load_balancer_pool.state == 'present'
+
+- name: Test load balancer pool deletion by name
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer_pool
+- name: Verify load balancer pool deletion by name
+ assert:
+ that:
+ - load_balancer_pool is changed
+ - load_balancer_pool.state == 'absent'
+
+- name: Test load balancer pool deletion by name idempotence
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: absent
+ register: load_balancer_pool
+- name: Verify load balancer pool deletion by name idempotence
+ assert:
+ that:
+ - load_balancer_pool is not changed
+ - load_balancer_pool.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/defaults/main.yml
new file mode 100644
index 000000000..6ac1a39d3
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+cloudscale_test_flavor_lb: lb-standard
+cloudscale_test_algorithm_lb_pool: round_robin
+cloudscale_test_protocol_lb_pool: tcp
+
+cloudscale_test_enabled_lb_member: true
+cloudscale_test_protocol_port_lb_member: 8080
+cloudscale_test_monitor_port_lb_member: 8181
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/failures.yml
new file mode 100644
index 000000000..c7e0d3b29
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/failures.yml
@@ -0,0 +1,34 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ register: load_balancer_pool_member
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - load_balancer_pool_member is failed
+
+- name: Fail create a load balancer pool member with non-existing status
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: 'enableme'
+ register: load_balancer_pool_member
+ ignore_errors: True
+- name: Verify fail create a load balancer pool member with non-existing status
+ assert:
+ that:
+ - load_balancer_pool_member is failed
+ - '"is not a valid boolean" in load_balancer_pool_member.msg'
+
+- name: Fail create a load balancer pool member with non-existing load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '15264769-ac69-4809-a8e4-4d73f8f92496'
+ register: load_balancer_pool_member
+ ignore_errors: True
+- name: Verify fail create a load balancer pool member with non-existing load balancer pool
+ assert:
+ that:
+ - load_balancer_pool_member is failed
+ - '"does not exist" in load_balancer_pool_member.msg'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/main.yml
new file mode 100644
index 000000000..c76b5ede4
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- block:
+ - import_tasks: setup.yml
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_load_balancers.yml
+ - import_role:
+ name: common
+ tasks_from: cleanup_networks.yml
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/setup.yml
new file mode 100644
index 000000000..9478149b1
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/setup.yml
@@ -0,0 +1,43 @@
+---
+- name: Ensure network exists
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-lb-network'
+ auto_create_ipv4_subnet: false
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: net
+
+- name: Ensure subnet exists
+ cloudscale_ch.cloud.subnet:
+ cidr: 172.16.0.0/24
+ network:
+ uuid: '{{ net.uuid }}'
+ register: snet
+
+- name: Test create a running load balancer
+ cloudscale_ch.cloud.load_balancer:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_lb }}'
+ vip_addresses:
+ - subnet: '{{ snet.uuid }}'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer
+
+- name: Test create a load balancer pool
+ cloudscale_ch.cloud.load_balancer_pool:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer: '{{ load_balancer.uuid }}'
+ algorithm: '{{ cloudscale_test_algorithm_lb_pool }}'
+ protocol: '{{ cloudscale_test_protocol_lb_pool }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/tests.yml
new file mode 100644
index 000000000..eab3bca84
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/load_balancer_pool_member/tasks/tests.yml
@@ -0,0 +1,306 @@
+---
+# Create LB pool member
+- name: Test create a load balancer pool member in check mode
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: '{{ cloudscale_test_enabled_lb_member }}'
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+ check_mode: yes
+- name: Verify create a load balancer pool member in check mode
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.state == 'absent'
+
+- name: Test create a load balancer pool member
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: '{{ cloudscale_test_enabled_lb_member }}'
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+- name: Verify create a load balancer pool member
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == {{ cloudscale_test_enabled_lb_member }}
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+- name: Test create a load balancer pool member idempotence
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: '{{ cloudscale_test_enabled_lb_member }}'
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+- name: Verify create a load balancer pool member idempotence
+ assert:
+ that:
+ - load_balancer_pool_member is not changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == {{ cloudscale_test_enabled_lb_member }}
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+# Get LB pool member facts
+- name: Test get facts of a load balancer pool member by name
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ register: load_balancer_pool_member
+- name: Verify get a load balancer pool member by name
+ assert:
+ that:
+ - load_balancer_pool_member is not changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+
+- name: Test get facts of a load balancer pool member by UUID
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ uuid: '{{ load_balancer_pool_member.uuid }}'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ register: load_balancer_pool_member
+- name: Verify get a load balancer pool member by UUID
+ assert:
+ that:
+ - load_balancer_pool_member is not changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+
+# Update LB pool member
+- name: Test update disable a load balancer pool member in check mode
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: false
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+ check_mode: yes
+- name: Verify update disable a load balancer pool member in check mode
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == {{ cloudscale_test_enabled_lb_member }}
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+- name: Test update disable a load balancer pool member
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: false
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+- name: Verify update disable a load balancer pool member
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == false
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+- name: Test update disable a load balancer pool member idempotence
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: false
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+- name: Verify update disable a load balancer pool member idempotence
+ assert:
+ that:
+ - load_balancer_pool_member is not changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == false
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+- name: Test update name of a load balancer pool member in check mode
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ uuid: '{{ load_balancer_pool_member.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: false
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+ check_mode: yes
+- debug: var=load_balancer_pool_member
+- name: Verify update name of a load balancer pool member in check mode
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == false
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+- name: Test update name of a load balancer pool member
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ uuid: '{{ load_balancer_pool_member.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: false
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+- name: Verify update name of a load balancer pool member
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == false
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+- name: Test update name of a load balancer pool member idempotence
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ uuid: '{{ load_balancer_pool_member.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ enabled: false
+ protocol_port: '{{ cloudscale_test_protocol_port_lb_member }}'
+ monitor_port: '{{ cloudscale_test_monitor_port_lb_member }}'
+ subnet: '{{ snet.uuid }}'
+ address: '172.16.0.100'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: load_balancer_pool_member
+- name: Verify update name of a load balancer pool member idempotence
+ assert:
+ that:
+ - load_balancer_pool_member is not changed
+ - load_balancer_pool_member.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+ - load_balancer_pool_member.pool.uuid == '{{ load_balancer_pool.uuid }}'
+ - load_balancer_pool_member.enabled == false
+ - load_balancer_pool_member.protocol_port == {{ cloudscale_test_protocol_port_lb_member }}
+ - load_balancer_pool_member.monitor_port == {{ cloudscale_test_monitor_port_lb_member }}
+ - load_balancer_pool_member.tags.project == 'ansible-test'
+ - load_balancer_pool_member.tags.stage == 'production'
+ - load_balancer_pool_member.tags.sla == '24-7'
+
+# Delete LB pool member
+- name: Test load balancer pool member deletion by name in check mode
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ state: absent
+ register: load_balancer_pool_member
+ check_mode: yes
+- name: Verify load balancer pool member deletion by name in check mode
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.state == 'present'
+
+- name: Test load balancer pool member deletion by name
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ state: absent
+ register: load_balancer_pool_member
+- name: Verify load balancer pool member deletion by name
+ assert:
+ that:
+ - load_balancer_pool_member is changed
+ - load_balancer_pool_member.state == 'absent'
+
+- name: Test load balancer pool member deletion by name idempotence
+ cloudscale_ch.cloud.load_balancer_pool_member:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ load_balancer_pool: '{{ load_balancer_pool.uuid }}'
+ state: absent
+ register: load_balancer_pool_member
+- name: Verify load balancer pool member deletion by name idempotence
+ assert:
+ that:
+ - load_balancer_pool_member is not changed
+ - load_balancer_pool_member.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml
new file mode 100644
index 000000000..57efebd5c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml
@@ -0,0 +1,44 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.network:
+ register: net
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - net is failed
+
+- name: Create two networks with the same name
+ uri:
+ url: 'https://api.cloudscale.ch/v1/networks'
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ body_format: json
+ status_code: 201
+ register: duplicate
+ with_sequence: count=2
+
+- name: Try access to duplicate name
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ register: net
+ ignore_errors: True
+- name: 'VERIFY: Try access to duplicate name'
+ assert:
+ that:
+ - net is failed
+ - net.msg.startswith("More than one networks resource with 'name' exists")
+
+- name: Fail network creation with UUID
+ cloudscale_ch.cloud.network:
+ uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ register: net
+ ignore_errors: True
+- name: 'VERIFY: Fail network creation with UUID'
+ assert:
+ that:
+ - net is failed
+ - net.msg.startswith("state is present but all of the following are missing")
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml
new file mode 100644
index 000000000..a89bd1401
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- block:
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_networks
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml
new file mode 100644
index 000000000..926a452d0
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml
@@ -0,0 +1,159 @@
+---
+- name: Create network in check mode
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: net
+ check_mode: yes
+- name: 'VERIFY: Create network in check mode'
+ assert:
+ that:
+ - net is changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net'
+ - not net.uuid
+
+- name: Create network
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: net
+- name: 'VERIFY: Create network'
+ assert:
+ that:
+ - net is changed
+ - net.mtu == 9000
+ - net.name == '{{ cloudscale_resource_prefix }}-net'
+ - net.zone.slug == cloudscale_test_zone
+ - net.uuid
+ - net.tags.project == 'ansible-test'
+ - net.tags.stage == 'production'
+ - net.tags.sla == '24-7'
+
+- name: Remember uuid
+ set_fact:
+ network_uuid: '{{ net.uuid }}'
+
+- name: Create network idempotence
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: net
+- name: 'VERIFY: Create network idempotence'
+ assert:
+ that:
+ - net is not changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net'
+ - net.zone.slug == cloudscale_test_zone
+ - net.uuid == network_uuid
+ - net.tags.project == 'ansible-test'
+ - net.tags.stage == 'production'
+ - net.tags.sla == '24-7'
+
+- name: Update network in check mode
+ cloudscale_ch.cloud.network:
+ uuid: '{{ network_uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-net2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: net
+ check_mode: yes
+- name: 'VERIFY: Update network in check mode'
+ assert:
+ that:
+ - net is changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net'
+ - net.uuid == network_uuid
+ - net.zone.slug == cloudscale_test_zone
+ - net.tags.project == 'ansible-test'
+ - net.tags.stage == 'production'
+ - net.tags.sla == '24-7'
+
+- name: Update network
+ cloudscale_ch.cloud.network:
+ uuid: '{{ network_uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-net2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: net
+- name: 'VERIFY: Update network'
+ assert:
+ that:
+ - net is changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net2'
+ - net.uuid == network_uuid
+ - net.zone.slug == cloudscale_test_zone
+ - net.tags.project == 'ansible-test'
+ - net.tags.stage == 'staging'
+ - net.tags.sla == '8-5'
+
+- name: Update network idempotence
+ cloudscale_ch.cloud.network:
+ uuid: '{{ network_uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-net2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: net
+- name: 'VERIFY: Update network idempotence'
+ assert:
+ that:
+ - net is not changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net2'
+ - net.uuid == network_uuid
+ - net.zone.slug == cloudscale_test_zone
+ - net.tags.project == 'ansible-test'
+ - net.tags.stage == 'staging'
+ - net.tags.sla == '8-5'
+
+- name: Delete network in check mode
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net2'
+ state: absent
+ register: net
+ check_mode: yes
+- name: 'VERIFY: Delete network in check mode'
+ assert:
+ that:
+ - net is changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net2'
+ - net.uuid == network_uuid
+
+- name: Delete network
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net2'
+ state: absent
+ register: net
+- name: 'VERIFY: Delete network'
+ assert:
+ that:
+ - net is changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net2'
+ - net.uuid == network_uuid
+
+- name: Delete network idempotence
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net2'
+ state: absent
+ register: net
+- name: 'VERIFY: Delete network idempotence'
+ assert:
+ that:
+ - net is not changed
+ - net.name == '{{ cloudscale_resource_prefix }}-net2'
+ - not net.uuid
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml
new file mode 100644
index 000000000..42d22d0da
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml
@@ -0,0 +1,44 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.objects_user:
+ register: obj
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - obj is failed
+
+- name: Create two objects user with the same display_name
+ uri:
+ url: 'https://api.cloudscale.ch/v1/objects-users'
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ display_name: '{{ cloudscale_resource_prefix }}-duplicate'
+ body_format: json
+ status_code: 201
+ register: duplicate
+ with_sequence: count=2
+
+- name: Try access to duplicate display_name
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-duplicate'
+ register: obj
+ ignore_errors: True
+- name: 'VERIFY: Try access to duplicate name'
+ assert:
+ that:
+ - obj is failed
+ - obj.msg.startswith("More than one objects-users resource with 'display_name' exists")
+
+- name: Fail creation with ID
+ cloudscale_ch.cloud.objects_user:
+ id: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ register: obj
+ ignore_errors: True
+- name: 'VERIFY: Fail server group creation with ID'
+ assert:
+ that:
+ - obj is failed
+ - obj.msg.startswith("state is present but all of the following are missing")
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml
new file mode 100644
index 000000000..69171378d
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- block:
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_objects_users
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml
new file mode 100644
index 000000000..b77921aad
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml
@@ -0,0 +1,151 @@
+---
+- name: Create objects user in check mode
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-obj'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: obj
+ check_mode: yes
+- name: 'VERIFY: Create objects user in check mode'
+ assert:
+ that:
+ - obj is changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj'
+ - not obj.id
+
+- name: Create objects user
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-obj'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: obj
+- name: 'VERIFY: Create objects user'
+ assert:
+ that:
+ - obj is changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj'
+ - obj.id
+ - obj.tags.project == 'ansible-test'
+ - obj.tags.stage == 'production'
+ - obj.tags.sla == '24-7'
+
+- name: Remember uuid
+ set_fact:
+ objects_user_id: '{{ obj.id }}'
+
+- name: Create objects user idempotence
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-obj'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: obj
+- name: 'VERIFY: Create objects user idempotence'
+ assert:
+ that:
+ - obj is not changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj'
+ - obj.id == objects_user_id
+ - obj.tags.project == 'ansible-test'
+ - obj.tags.stage == 'production'
+ - obj.tags.sla == '24-7'
+
+- name: Update objects user in check mode
+ cloudscale_ch.cloud.objects_user:
+ id: '{{ objects_user_id }}'
+ display_name: '{{ cloudscale_resource_prefix }}-obj2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: obj
+ check_mode: yes
+- name: 'VERIFY: Update objects user in check mode'
+ assert:
+ that:
+ - obj is changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj'
+ - obj.id == objects_user_id
+ - obj.tags.project == 'ansible-test'
+ - obj.tags.stage == 'production'
+ - obj.tags.sla == '24-7'
+
+- name: Update objects user
+ cloudscale_ch.cloud.objects_user:
+ id: '{{ objects_user_id }}'
+ display_name: '{{ cloudscale_resource_prefix }}-obj2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: obj
+- name: 'VERIFY: Update objects user'
+ assert:
+ that:
+ - obj is changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2'
+ - obj.id == objects_user_id
+ - obj.tags.project == 'ansible-test'
+ - obj.tags.stage == 'staging'
+ - obj.tags.sla == '8-5'
+
+- name: Update objects user idempotence
+ cloudscale_ch.cloud.objects_user:
+ id: '{{ objects_user_id }}'
+ display_name: '{{ cloudscale_resource_prefix }}-obj2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: obj
+- name: 'VERIFY: Update objects user idempotence'
+ assert:
+ that:
+ - obj is not changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2'
+ - obj.id == objects_user_id
+ - obj.tags.project == 'ansible-test'
+ - obj.tags.stage == 'staging'
+ - obj.tags.sla == '8-5'
+
+- name: Delete objects user in check mode
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-obj2'
+ state: absent
+ register: obj
+ check_mode: yes
+- name: 'VERIFY: Delete objects user in check mode'
+ assert:
+ that:
+ - obj is changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2'
+ - obj.id == objects_user_id
+
+- name: Delete objects user
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-obj2'
+ state: absent
+ register: obj
+- name: 'VERIFY: Delete objects user'
+ assert:
+ that:
+ - obj is changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2'
+ - obj.id == objects_user_id
+
+- name: Delete objects user idempotence
+ cloudscale_ch.cloud.objects_user:
+ display_name: '{{ cloudscale_resource_prefix }}-obj2'
+ state: absent
+ register: obj
+- name: 'VERIFY: Delete objects user idempotence'
+ assert:
+ that:
+ - obj is not changed
+ - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2'
+ - not obj.id
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml
new file mode 100644
index 000000000..a1ba98be1
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+cloudscale_test_flavor_2: flex-4-4
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml
new file mode 100644
index 000000000..8d9ebee04
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml
@@ -0,0 +1,53 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.server:
+ register: srv
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - srv is failed
+
+- name: Fail unexisting server group
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-group'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ password: '{{ cloudscale_test_password }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-unexist-group'
+ ignore_errors: True
+ register: srv
+- name: 'VERIFY: Fail unexisting server group'
+ assert:
+ that:
+ - srv is failed
+ - srv.msg.startswith('Server group name or UUID not found')
+
+- name: Create two server groups with the same name
+ uri:
+ url: https://api.cloudscale.ch/v1/server-groups
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ type: anti-affinity
+ body_format: json
+ status_code: 201
+ register: duplicate
+ with_sequence: count=2
+
+- name: Try to use server groups with identical name
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-group'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ password: '{{ cloudscale_test_password }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-duplicate'
+ ignore_errors: True
+ register: srv
+- name: 'VERIFY: Fail unexisting server group'
+ assert:
+ that:
+ - srv is failed
+ - srv.msg.startswith('More than one server group with name exists')
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml
new file mode 100644
index 000000000..9f7641bd4
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- block:
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_servers
+ - import_role:
+ name: common
+ tasks_from: cleanup_server_groups
+ - import_role:
+ name: common
+ tasks_from: cleanup_networks
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml
new file mode 100644
index 000000000..7d9c33bd5
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml
@@ -0,0 +1,991 @@
+---
+- name: Setup server groups
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-group-{{ item }}'
+ type: anti-affinity
+ zone: '{{ cloudscale_test_zone }}'
+ with_sequence: count=2
+
+- name: Test create a running server in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: server
+ check_mode: yes
+- name: Verify create a running server in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'absent'
+
+- name: Test create a running server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: server
+- name: Verify create a running server
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - server.zone.slug == '{{ cloudscale_test_zone }}'
+ - server.tags.project == 'ansible-test'
+ - server.tags.stage == 'production'
+ - server.tags.sla == '24-7'
+
+- name: Test create a running server idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: server
+- name: Verify create a running server idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - server.zone.slug == '{{ cloudscale_test_zone }}'
+ - server.tags.project == 'ansible-test'
+ - server.tags.stage == 'production'
+ - server.tags.sla == '24-7'
+
+- name: Test update tags in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: server
+ check_mode: yes
+- name: Verify update tags in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - server.tags.project == 'ansible-test'
+ - server.tags.stage == 'production'
+ - server.tags.sla == '24-7'
+
+- name: Test update tags
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: server
+- name: Verify update tags
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - server.tags.project == 'ansible-test'
+ - server.tags.stage == 'staging'
+ - server.tags.sla == '8-5'
+
+- name: Test update tags idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: server
+- name: Verify update tags idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - server.tags.project == 'ansible-test'
+ - server.tags.stage == 'staging'
+ - server.tags.sla == '8-5'
+
+- name: Test omit tags idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ register: server
+- name: Verify update tags idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - server.tags.project == 'ansible-test'
+ - server.tags.stage == 'staging'
+ - server.tags.sla == '8-5'
+
+- name: Test delete tags
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ tags: {}
+ register: server
+- name: Verify delete tags
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - not server.tags
+
+- name: Test delete tags idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ tags: {}
+ register: server
+- name: Verify delete tags idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+ - not server.tags
+
+- name: Test update flavor of a running server without force in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: no
+ register: server
+ check_mode: yes
+- name: Verify update flavor of a running server without force in check mode
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+
+- name: Test update flavor of a running server without force
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: no
+ register: server
+- name: Verify update flavor of a running server without force
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+
+- name: Test update flavor of a running server without force idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: no
+ register: server
+- name: Verify update flavor of a running server without force idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+
+- name: Test update flavor and name of a running server without force in check mode
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: no
+ register: server
+ check_mode: yes
+- name: Verify update flavor and name of a running server without force in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test'
+
+- name: Test update flavor and name of a running server without force
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: no
+ register: server
+- name: Verify update flavor and name of a running server without force
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Test update flavor and name of a running server without force idempotence
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: no
+ register: server
+- name: Verify update flavor and name of a running server without force idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Test update flavor of a running server with force in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: yes
+ register: server
+ check_mode: yes
+- name: Verify update flavor of a running server with force in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Test update flavor of a running server with force
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: yes
+ register: server
+- name: Verify update flavor of a running server with force
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Test update a running server with force idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ flavor: '{{ cloudscale_test_flavor_2 }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ force: yes
+ register: server
+- name: Verify update flavor of a running server with force idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+ - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Remember uuid of running server for anti affinity
+ set_fact:
+ running_server_uuid: '{{ server.uuid }}'
+
+- name: Test create server stopped in anti affinity and private network only in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-stopped'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ zone: '{{ cloudscale_test_zone }}'
+ use_public_network: no
+ use_private_network: yes
+ state: stopped
+ check_mode: yes
+ register: server_stopped
+- name: Verify create server stopped in anti affinity and private network only in check mode
+ assert:
+ that:
+ - server_stopped is changed
+ - server_stopped.state == 'absent'
+
+- name: Test create server stopped in anti affinity and private network only
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-stopped'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ zone: '{{ cloudscale_test_zone }}'
+ use_public_network: no
+ use_private_network: yes
+ state: stopped
+ register: server_stopped
+- name: Verify create server stopped in anti affinity and private network only
+ assert:
+ that:
+ - server_stopped is changed
+ - server_stopped.state == 'stopped'
+ - server_stopped.zone.slug == '{{ cloudscale_test_zone }}'
+ - server_stopped.anti_affinity_with.0.uuid == running_server_uuid
+ - server_stopped.interfaces.0.type == 'private'
+ - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+
+- name: Test create server stopped in anti affinity and private network only idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-stopped'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-1'
+ zone: '{{ cloudscale_test_zone }}'
+ use_public_network: no
+ use_private_network: yes
+ state: stopped
+ register: server_stopped
+- name: Verify create server stopped in anti affinity and private network only idempotence
+ assert:
+ that:
+ - server_stopped is not changed
+ - server_stopped.state == 'stopped'
+ - server_stopped.zone.slug == '{{ cloudscale_test_zone }}'
+ - server_stopped.anti_affinity_with.0.uuid == running_server_uuid
+ - server_stopped.interfaces.0.type == 'private'
+ - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+
+- name: Test change server group not changed
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-stopped'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ server_groups: '{{ cloudscale_resource_prefix }}-group-2'
+ use_public_network: no
+ use_private_network: yes
+ state: stopped
+ register: server_stopped
+- name: Verify Test update server group not changed
+ assert:
+ that:
+ - server_stopped is not changed
+ - server_stopped.state == 'stopped'
+ - server_stopped.zone.slug == '{{ cloudscale_test_zone }}'
+ - server_stopped.anti_affinity_with.0.uuid == running_server_uuid
+ - server_stopped.interfaces.0.type == 'private'
+ - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1'
+
+- name: Test create server with password in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-password'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ password: '{{ cloudscale_test_password }}'
+ check_mode: yes
+ register: server_password
+- name: Verify create server with password in check mode
+ assert:
+ that:
+ - server_password is changed
+ - server_password.state == 'absent'
+ # Verify password is not logged
+ - server_password.diff.after.password != cloudscale_test_password
+
+- name: Test create server with password
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-password'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ password: '{{ cloudscale_test_password }}'
+ register: server_password
+- name: Verify create server with password
+ assert:
+ that:
+ - server_password is changed
+ - server_password.state == 'running'
+ # Verify password is not logged
+ - server_password.diff.after.password != cloudscale_test_password
+
+- name: Test create server with password idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-password'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ password: '{{ cloudscale_test_password }}'
+ register: server_password
+- name: Verify create server with password idempotence
+ assert:
+ that:
+ - server_password is not changed
+ - server_password.state == 'running'
+
+- name: Test create server failure without required parameters
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-failed'
+ register: server_failed
+ ignore_errors: yes
+- name: Verify create server failure without required parameters
+ assert:
+ that:
+ - server_failed is failed
+ - "'Failure while calling the cloudscale.ch API with POST' in server_failed.msg"
+ - "'This field is required.' in server_failed.fetch_url_info.body"
+
+- name: Test stop running server in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: stopped
+ check_mode: yes
+ register: server
+- name: Verify stop running server in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+
+- name: Test stop running server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: stopped
+ register: server
+- name: Verify stop running server
+ assert:
+ that:
+ - server is changed
+ - server.state == 'stopped'
+
+- name: Test stop running server idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-renamed'
+ state: 'stopped'
+ register: server
+- name: Verify stop running server idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'stopped'
+
+- name: Test update a stopped server in check mode
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ state: stopped
+ register: server
+ check_mode: yes
+- name: Verify update a stopped server in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'stopped'
+ - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test-renamed'
+
+- name: Test update a stopped server without force
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ state: stopped
+ register: server
+- name: Verify update a stopped server without force
+ assert:
+ that:
+ - server is changed
+ - server.state == 'stopped'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test'
+
+- name: Test update a stopped server idempotence
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-test'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ state: stopped
+ register: server
+- name: Verify update a stopped server idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'stopped'
+ - server.flavor.slug == '{{ cloudscale_test_flavor }}'
+ - server.name == '{{ cloudscale_resource_prefix }}-test'
+
+- name: Test server running in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ state: running
+ register: server
+ check_mode: yes
+- name: Verify server running in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'stopped'
+
+- name: Test server running
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ state: running
+ register: server
+- name: Verify server running
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+
+- name: Test server running idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ state: running
+ register: server
+- name: Verify server running idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'running'
+
+- name: Test running server deletion by name in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ state: absent
+ register: server
+ check_mode: yes
+- name: Verify running server deletion by name in check mode
+ assert:
+ that:
+ - server is changed
+ - server.state == 'running'
+
+- name: Test running server deletion by name
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ state: absent
+ register: server
+- name: Verify running server deletion by name
+ assert:
+ that:
+ - server is changed
+ - server.state == 'absent'
+
+- name: Test running server deletion by name idempotence
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test'
+ state: absent
+ register: server
+- name: Verify running server deletion by name idempotence
+ assert:
+ that:
+ - server is not changed
+ - server.state == 'absent'
+
+- name: Test stopped server deletion by uuid in check mode
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server_stopped.uuid }}'
+ state: absent
+ register: server_stopped
+ check_mode: yes
+- name: Verify stopped server deletion by uuid in check mode
+ assert:
+ that:
+ - server_stopped is changed
+ - server_stopped.state == 'stopped'
+
+- name: Test stopped server deletion by uuid
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server_stopped.uuid }}'
+ state: absent
+ register: server_stopped
+- name: Verify stopped server deletion by uuid
+ assert:
+ that:
+ - server_stopped is changed
+ - server_stopped.state == 'absent'
+
+- name: Test stopped server deletion by uuid idempotence
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server_stopped.uuid }}'
+ state: absent
+ register: server_stopped
+- name: Verify stopped server deletion by uuid idempotence
+ assert:
+ that:
+ - server_stopped is not changed
+ - server_stopped.state == 'absent'
+
+- name: Create first private network
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-test-interface-network'
+ state: present
+ zone: '{{ cloudscale_test_zone }}'
+ register: test_interface_network
+
+- name: Create second private network
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-test-interface-network1'
+ state: present
+ zone: '{{ cloudscale_test_zone }}'
+ register: test_interface_network1
+
+- name: Create server in specific private network in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ zone: '{{ cloudscale_test_zone }}'
+ state: running
+ interfaces:
+ - network: '{{ test_interface_network.uuid }}'
+ check_mode: yes
+ register: server
+- name: Verify Create server in specific private network in check mode
+ assert:
+ that:
+ - server.changed == True
+ - server.state == 'absent'
+
+- name: Create server in specific private network
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ zone: '{{ cloudscale_test_zone }}'
+ state: running
+ interfaces:
+ - network: '{{ test_interface_network.uuid }}'
+ register: server
+- name: Verify Create server in specific private network
+ assert:
+ that:
+ - server.changed == True
+ - server.state == 'running'
+ - server.interfaces.0.network.name == '{{ cloudscale_resource_prefix }}-test-interface-network'
+
+- name: Create server in specific private network idempotency
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ flavor: '{{ cloudscale_test_flavor }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ zone: '{{ cloudscale_test_zone }}'
+ state: running
+ interfaces:
+ - network: '{{ test_interface_network.uuid }}'
+ register: server
+- name: Verify Create server in specific private network idempotency
+ assert:
+ that:
+ - server.changed == False
+ - server.state == 'running'
+ - server.interfaces.0.network.name == '{{ cloudscale_resource_prefix }}-test-interface-network'
+
+- name: Remove private IP and add public network from server in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - network: '{{ test_interface_network.uuid }}'
+ addresses: []
+ check_mode: yes
+ register: server
+- name: Verify Remove private IP and add public network from server in check mode
+ assert:
+ that:
+ - server.changed == True
+ - server.interfaces.0.network.name == '{{ cloudscale_resource_prefix }}-test-interface-network'
+
+- name: Remove private IP and add public network from server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - network: '{{ test_interface_network.uuid }}'
+ addresses: []
+ register: server2
+- name: Verify Remove private IP and add public network from server
+ assert:
+ that:
+ - server2.changed
+ - server2.interfaces | selectattr("type", "eq", "public") | list | length == 1
+ - server2.interfaces | selectattr("type", "eq", "private") | list | length == 1
+ - (server2.interfaces | selectattr("type", "eq", "private") | first).addresses == []
+
+- name: Remove private IP and add public network from server idempotency
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - network: '{{ test_interface_network.uuid }}'
+ addresses: []
+ register: server
+- name: Verify Remove private IP and add public network from server in check mode
+ assert:
+ that:
+ - server.changed == False
+
+- name: Remove private network from server in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ check_mode: True
+ register: server
+- name: Verify Remove private network from server in check mode
+ assert:
+ that:
+ - server.changed == True
+ - server.interfaces | selectattr("type", "eq", "public") | list | length == 1
+ - server.interfaces | selectattr("type", "eq", "private") | list | length == 1
+
+- name: Remove private network from server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ register: server
+- name: Verify Remove private network from server
+ assert:
+ that:
+ - server.changed
+ - server.interfaces | selectattr("type", "eq", "public") | list | length == 1
+ - server.interfaces | selectattr("type", "eq", "private") | list | length == 0
+
+- name: Remove private network from server idempotency
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ register: server
+- name: Verify Remove private network from server idempotency
+ assert:
+ that:
+ - server.changed == False
+
+- name: Attach specific private network to server in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - network: "{{ test_interface_network1.uuid }}"
+ check_mode: True
+ register: server
+- name: Verify Attach specific private network to server in check mode
+ assert:
+ that:
+ - server.changed == True
+ - server.interfaces | selectattr("type", "eq", "public") | list | length == 1
+ - server.interfaces | selectattr("type", "eq", "private") | list | length == 0
+
+- name: Attach specific private network to server
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - network: "{{ test_interface_network1.uuid }}"
+ register: server
+- name: Verify Attach specific private network to server
+ assert:
+ that:
+ - server.changed
+ - server.interfaces.1.network.uuid == test_interface_network1.uuid
+
+- name: Attach specific private network to server idempotency
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - network: "{{ test_interface_network1.uuid }}"
+ register: server
+- name: Verify Attach specific private network to server
+ assert:
+ that:
+ - server.changed == False
+
+- name: Set fact private IP address
+ set_fact:
+ private_address0: "{{ test_interface_network1.subnets.0.cidr | regex_replace('0/24','31') }}"
+ private_address1: "{{ test_interface_network1.subnets.0.cidr | regex_replace('0/24','32') }}"
+
+- name: Set a static IP in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: "{{ test_interface_network1.subnets.0.uuid }}"
+ address: "{{ private_address0 }}"
+ check_mode: True
+ register: server_alt
+- name: Verify Set a static IP check mode
+ assert:
+ that:
+ - server_alt.changed
+ - server.interfaces.1.addresses.0.address == server_alt.interfaces.1.addresses.0.address
+
+- name: Set a static IP
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: "{{ test_interface_network1.subnets.0.uuid }}"
+ address: "{{ private_address0 }}"
+ register: server
+- name: Verify Set a static IP
+ assert:
+ that:
+ - server.changed
+ - private_address0 == server.interfaces.1.addresses.0.address
+
+- name: Set a static IP idempotency
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: "{{ test_interface_network1.subnets.0.uuid }}"
+ address: "{{ private_address0 }}"
+ register: server
+- name: Verify Set a static IP idempotency
+ assert:
+ that:
+ - server.changed == False
+ - private_address0 == server.interfaces.1.addresses.0.address
+
+- name: Change a static IP in check mode
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: "{{ test_interface_network1.subnets.0.uuid }}"
+ address: "{{ private_address1 }}"
+ check_mode: True
+ register: server
+- name: Verify Change a static IP check mode
+ assert:
+ that:
+ - server.changed
+ - private_address0 == server.interfaces.1.addresses.0.address
+
+- name: Change a static IP
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: "{{ test_interface_network1.subnets.0.uuid }}"
+ address: "{{ private_address1 }}"
+ register: server
+- name: Verify Change a static IP
+ assert:
+ that:
+ - server.changed
+ - private_address1 == server.interfaces.1.addresses.0.address
+
+- name: Change a static IP idempotency
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-test-interface'
+ interfaces:
+ - network: 'public'
+ - addresses:
+ - subnet: "{{ test_interface_network1.subnets.0.uuid }}"
+ address: "{{ private_address1 }}"
+ register: server
+- name: Verify Change a static IP idempotency
+ assert:
+ that:
+ - server.changed == False
+ - private_address1 == server.interfaces.1.addresses.0.address
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml
new file mode 100644
index 000000000..b02ee0315
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml
@@ -0,0 +1,45 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.server_group:
+ register: grp
+ ignore_errors: True
+- name: 'VERIFY: Fail name and UUID'
+ assert:
+ that:
+ - grp is failed
+
+- name: Create two server groups with the same name
+ uri:
+ url: 'https://api.cloudscale.ch/v1/server-groups'
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ type: 'anti-affinity'
+ body_format: json
+ status_code: 201
+ register: duplicate
+ with_sequence: count=2
+
+- name: Try access to duplicate name
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ register: grp
+ ignore_errors: True
+- name: 'VERIFY: Try access to duplicate name'
+ assert:
+ that:
+ - grp is failed
+ - grp.msg.startswith("More than one server-groups resource with 'name' exists")
+
+- name: Fail server group creation with UUID
+ cloudscale_ch.cloud.server_group:
+ uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ register: grp
+ ignore_errors: True
+- name: 'VERIFY: Fail server group creation with UUID'
+ assert:
+ that:
+ - grp is failed
+ - grp.msg.startswith("state is present but all of the following are missing")
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml
new file mode 100644
index 000000000..44fe73032
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- block:
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_server_groups
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml
new file mode 100644
index 000000000..018b85a5f
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml
@@ -0,0 +1,180 @@
+---
+- name: Create server group in check mode
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: grp
+ check_mode: yes
+- name: 'VERIFY: Create server group in check mode'
+ assert:
+ that:
+ - grp is changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp'
+ - not grp.uuid
+
+- name: Create server group
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: grp
+- name: 'VERIFY: Create server group'
+ assert:
+ that:
+ - grp is changed
+ - grp.type == 'anti-affinity'
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp'
+ - grp.zone.slug == '{{ cloudscale_test_zone }}'
+ - grp.uuid
+ - grp.tags.project == 'ansible-test'
+ - grp.tags.stage == 'production'
+ - grp.tags.sla == '24-7'
+
+- name: Remember uuid
+ set_fact:
+ server_group_uuid: '{{ grp.uuid }}'
+
+- name: Create server group idempotence
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: grp
+- name: 'VERIFY: Create server group idempotence'
+ assert:
+ that:
+ - grp is not changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp'
+ - grp.zone.slug == '{{ cloudscale_test_zone }}'
+ - grp.uuid == server_group_uuid
+ - grp.tags.project == 'ansible-test'
+ - grp.tags.stage == 'production'
+ - grp.tags.sla == '24-7'
+
+- name: Create server group with same name in alt zone
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp'
+ zone: '{{ cloudscale_test_alt_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: grp
+- name: 'VERIFY:Create server group with same name in alt zone'
+ assert:
+ that:
+ - grp is changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp'
+ - grp.zone.slug == '{{ cloudscale_test_alt_zone }}'
+ - grp.uuid != server_group_uuid
+ - grp.tags.project == 'ansible-test'
+ - grp.tags.stage == 'production'
+ - grp.tags.sla == '24-7'
+
+- name: Update server group in check mode
+ cloudscale_ch.cloud.server_group:
+ uuid: '{{ server_group_uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-grp2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: grp
+ check_mode: yes
+- name: 'VERIFY: Update server group in check mode'
+ assert:
+ that:
+ - grp is changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp'
+ - grp.uuid == server_group_uuid
+ - grp.zone.slug == '{{ cloudscale_test_zone }}'
+ - grp.tags.project == 'ansible-test'
+ - grp.tags.stage == 'production'
+ - grp.tags.sla == '24-7'
+
+- name: Update server group
+ cloudscale_ch.cloud.server_group:
+ uuid: '{{ server_group_uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-grp2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: grp
+- name: 'VERIFY: Update server group'
+ assert:
+ that:
+ - grp is changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp2'
+ - grp.uuid == server_group_uuid
+ - grp.zone.slug == '{{ cloudscale_test_zone }}'
+ - grp.tags.project == 'ansible-test'
+ - grp.tags.stage == 'staging'
+ - grp.tags.sla == '8-5'
+
+- name: Update server group idempotence
+ cloudscale_ch.cloud.server_group:
+ uuid: '{{ server_group_uuid }}'
+ name: '{{ cloudscale_resource_prefix }}-grp2'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: grp
+- name: 'VERIFY: Update server group idempotence'
+ assert:
+ that:
+ - grp is not changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp2'
+ - grp.uuid == server_group_uuid
+ - grp.zone.slug == '{{ cloudscale_test_zone }}'
+ - grp.tags.project == 'ansible-test'
+ - grp.tags.stage == 'staging'
+ - grp.tags.sla == '8-5'
+
+- name: Delete server group in check mode
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp2'
+ state: absent
+ register: grp
+ check_mode: yes
+- name: 'VERIFY: Delete server group in check mode'
+ assert:
+ that:
+ - grp is changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp2'
+ - grp.uuid == server_group_uuid
+
+- name: Delete server group
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp2'
+ state: absent
+ register: grp
+- name: 'VERIFY: Delete server group'
+ assert:
+ that:
+ - grp is changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp2'
+ - grp.uuid == server_group_uuid
+
+- name: Delete server group idempotence
+ cloudscale_ch.cloud.server_group:
+ name: '{{ cloudscale_resource_prefix }}-grp2'
+ state: absent
+ register: grp
+- name: 'VERIFY: Delete server group idempotence'
+ assert:
+ that:
+ - grp is not changed
+ - grp.name == '{{ cloudscale_resource_prefix }}-grp2'
+ - not grp.uuid
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml
new file mode 100644
index 000000000..67d807cd7
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+cloudscale_subnet_cidr: 192.168.23.0/24
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml
new file mode 100644
index 000000000..c28f5ab0b
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml
@@ -0,0 +1,82 @@
+---
+- name: Fail missing params
+ cloudscale_ch.cloud.subnet:
+ register: snet
+ ignore_errors: True
+- name: 'VERIFY: Fail missing params'
+ assert:
+ that:
+ - snet is failed
+ - snet.msg.startswith("one of the following is required")
+
+- name: Fail creation with UUID
+ cloudscale_ch.cloud.subnet:
+ uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ register: snet
+ ignore_errors: True
+- name: 'VERIFY: Fail creation with UUID'
+ assert:
+ that:
+ - snet is failed
+ - snet.msg.startswith("state is present but all of the following are missing")
+
+- name: Fail creation unknown network name
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-does-not-exist'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: snet
+ ignore_errors: True
+- name: 'VERIFY: Fail creation unknown network name'
+ assert:
+ that:
+ - snet is failed
+ - snet.msg.startswith("Network with 'name' not found")
+
+- name: Fail creation unknown network uuid
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ uuid: 'f0bb5270-f66c-41d6-ac3b-a223cd280ced'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: snet
+ ignore_errors: True
+- name: 'VERIFY: Fail creation unknown network uuid'
+ assert:
+ that:
+ - snet is failed
+ - snet.msg.startswith("Network with 'uuid' not found")
+
+- name: Create two networks with the same name
+ uri:
+ url: 'https://api.cloudscale.ch/v1/networks'
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ auto_create_ipv4_subnet: False
+ body_format: json
+ status_code: 201
+ register: duplicate
+ with_sequence: count=2
+
+- name: Try access to duplicate network name
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ register: snet
+ ignore_errors: True
+- name: 'VERIFY: Try access to duplicate network name'
+ assert:
+ that:
+ - snet is failed
+ - snet.msg.startswith("Multiple networks with 'name' not found")
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml
new file mode 100644
index 000000000..54078ffa8
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Subnet test run
+ block:
+ - import_tasks: failures.yml
+ - import_tasks: setup.yml
+ - import_tasks: tests.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_networks
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml
new file mode 100644
index 000000000..4662f9e72
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml
@@ -0,0 +1,13 @@
+---
+- name: Setup network in alt zone
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ auto_create_ipv4_subnet: false
+ zone: '{{ cloudscale_test_alt_zone }}'
+
+- name: Setup network in test zone
+ cloudscale_ch.cloud.network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ auto_create_ipv4_subnet: false
+ zone: '{{ cloudscale_test_zone }}'
+ register: net
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml
new file mode 100644
index 000000000..5e0ff6b1a
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml
@@ -0,0 +1,243 @@
+---
+- name: Create subnet in check mode
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: snet
+ check_mode: yes
+- name: 'VERIFY: Create subnet in check mode'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - not snet.uuid
+
+- name: Create subnet
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: snet
+- name: 'VERIFY: Create subnet'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'production'
+ - snet.tags.sla == '24-7'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Remember subnet uuid
+ set_fact:
+ subnet_uuid: '{{ snet.uuid }}'
+
+- name: Create subnet idempotence
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: snet
+- name: 'VERIFY: Create subnet idempotence'
+ assert:
+ that:
+ - snet is not changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'production'
+ - snet.tags.sla == '24-7'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Update subnet in check mode
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: snet
+ check_mode: yes
+- name: 'VERIFY: Update subnet in check mode'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'production'
+ - snet.tags.sla == '24-7'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Update subnet
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: snet
+- name: 'VERIFY: Update subnet'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'staging'
+ - snet.tags.sla == '8-5'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Update subnet idempotence
+ cloudscale_ch.cloud.subnet:
+ uuid: '{{ subnet_uuid }}'
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ name: '{{ cloudscale_resource_prefix }}-net'
+ zone: '{{ cloudscale_test_zone }}'
+ tags:
+ project: ansible-test
+ stage: staging
+ sla: 8-5
+ register: snet
+- name: 'VERIFY: Update subnet idempotence'
+ assert:
+ that:
+ - snet is not changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'staging'
+ - snet.tags.sla == '8-5'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Update subnet by Network UUID
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ uuid: '{{ net.uuid }}'
+ dns_servers:
+ - 9.9.9.9
+ - 8.8.8.8
+ gateway_address: 192.168.23.1
+ register: snet
+- name: 'VERIFY: Update subnet'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.dns_servers == ['9.9.9.9', '8.8.8.8']
+ - snet.gateway_address == '192.168.23.1'
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'staging'
+ - snet.tags.sla == '8-5'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Update subnet by Network UUID idempotence
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ uuid: '{{ net.uuid }}'
+ dns_servers:
+ - 9.9.9.9
+ - 8.8.8.8
+ gateway_address: 192.168.23.1
+ register: snet
+- name: 'VERIFY: Update subnet'
+ assert:
+ that:
+ - snet is not changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.dns_servers == ['9.9.9.9', '8.8.8.8']
+ - snet.gateway_address == '192.168.23.1'
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'staging'
+ - snet.tags.sla == '8-5'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Reset DNS servers in subnet
+ cloudscale_ch.cloud.subnet:
+ cidr: '{{ cloudscale_subnet_cidr }}'
+ network:
+ uuid: '{{ net.uuid }}'
+ gateway_address: 192.168.23.1
+ reset: true
+ register: snet
+- name: 'VERIFY: Update subnet'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.dns_servers != ['9.9.9.9', '8.8.8.8']
+ - snet.gateway_address == '192.168.23.1'
+ - snet.tags.project == 'ansible-test'
+ - snet.tags.stage == 'staging'
+ - snet.tags.sla == '8-5'
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Delete subnet in check mode
+ cloudscale_ch.cloud.subnet:
+ uuid: "{{ snet.uuid }}"
+ state: absent
+ register: snet
+ check_mode: yes
+- name: 'VERIFY: Delete subnet in check mode'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.state == "present"
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Delete subnet
+ cloudscale_ch.cloud.subnet:
+ uuid: "{{ snet.uuid }}"
+ state: absent
+ register: snet
+- name: 'VERIFY: Delete subnet'
+ assert:
+ that:
+ - snet is changed
+ - snet.cidr == cloudscale_subnet_cidr
+ - snet.uuid == subnet_uuid
+ - snet.state == "absent"
+ - snet.network.zone.slug == cloudscale_test_zone
+
+- name: Delete subnet idempotence
+ cloudscale_ch.cloud.subnet:
+ uuid: "{{ snet.uuid }}"
+ state: absent
+ register: snet
+- name: 'VERIFY: Delete subnet idempotence'
+ assert:
+ that:
+ - snet is not changed
+ - snet.uuid == subnet_uuid
+ - snet.state == "absent"
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases
new file mode 100644
index 000000000..c200a3d2c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases
@@ -0,0 +1,2 @@
+cloud/cloudscale
+unsupported
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml
new file mode 100644
index 000000000..2083f0e12
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - common
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml
new file mode 100644
index 000000000..e7abce571
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove test server
+ cloudscale_ch.cloud.server:
+ uuid: '{{ server.uuid }}'
+ state: 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml
new file mode 100644
index 000000000..8dfa534aa
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml
@@ -0,0 +1,31 @@
+---
+# TODO To be removed in version 3.0.0
+- name: Test server_uuid deprecation warning
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ zone: '{{ cloudscale_test_zone }}'
+ size_gb: 50
+ server_uuid:
+ - 'd231db77-fdb3-4301-ae7c-f68ca2574496'
+ check_mode: True
+ register: vol
+- name: Verify server_uuid deprecation waring
+ assert:
+ that:
+ - vol is changed
+ - '"deprecations" in vol'
+
+- name: Test server_uuids deprecation warning
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ zone: '{{ cloudscale_test_zone }}'
+ size_gb: 50
+ server_uuids:
+ - 'd231db77-fdb3-4301-ae7c-f68ca2574496'
+ check_mode: True
+ register: vol
+- name: Verify server_uuids deprecation waring
+ assert:
+ that:
+ - vol is changed
+ - '"deprecations" in vol'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml
new file mode 100644
index 000000000..4962a7a7c
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml
@@ -0,0 +1,38 @@
+---
+- name: Create two volumes with the same name
+ uri:
+ url: 'https://api.cloudscale.ch/v1/volumes'
+ method: POST
+ headers:
+ Authorization: 'Bearer {{ cloudscale_api_token }}'
+ body:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ size_gb: 50
+ body_format: json
+ status_code: 201
+ register: duplicate
+ with_sequence: count=2
+
+- name: Try access to duplicate name
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-duplicate'
+ size_gb: 10
+ register: vol
+ ignore_errors: True
+- name: 'VERIFY: Try access to duplicate name'
+ assert:
+ that:
+ - vol is failed
+
+- name: Fail volume creation with UUID
+ cloudscale_ch.cloud.volume:
+ uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
+ name: '{{ cloudscale_resource_prefix }}-inexistent'
+ size_gb: 10
+ register: vol
+ ignore_errors: True
+- name: 'VERIFY: Fail volume creation with UUID'
+ assert:
+ that:
+ - vol is failed
+ - vol.msg.startswith("The resource with UUID 'ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48' was not found")
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml
new file mode 100644
index 000000000..21ea73867
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- block:
+ - import_tasks: setup.yml
+ - import_tasks: failures.yml
+ - import_tasks: tests.yml
+ - import_tasks: deprecation_warning.yml
+ always:
+ - import_role:
+ name: common
+ tasks_from: cleanup_servers
+ - import_role:
+ name: common
+ tasks_from: cleanup_volumes
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml
new file mode 100644
index 000000000..9860e0b52
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml
@@ -0,0 +1,10 @@
+---
+- name: Create test instance
+ cloudscale_ch.cloud.server:
+ name: '{{ cloudscale_resource_prefix }}-server'
+ flavor: '{{ cloudscale_test_flavor }}'
+ zone: '{{ cloudscale_test_zone }}'
+ image: '{{ cloudscale_test_image }}'
+ ssh_keys:
+ - '{{ cloudscale_test_ssh_key }}'
+ register: server
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml
new file mode 100644
index 000000000..d72e731a6
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml
@@ -0,0 +1,260 @@
+---
+- name: Create volume in check mode
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ zone: '{{ cloudscale_test_zone }}'
+ size_gb: 50
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ check_mode: yes
+ register: vol
+- name: 'VERIFY: Create volume in check mode'
+ assert:
+ that:
+ - vol is successful
+ - vol is changed
+ - vol.state == 'absent'
+
+- name: Create volume
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ zone: '{{ cloudscale_test_zone }}'
+ size_gb: 50
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: vol
+- name: 'VERIFY: Create volume'
+ assert:
+ that:
+ - vol is successful
+ - vol is changed
+ - vol.size_gb == 50
+ - vol.name == '{{ cloudscale_resource_prefix }}-vol'
+ - vol.zone.slug == '{{ cloudscale_test_zone }}'
+ - vol.tags.project == 'ansible-test'
+ - vol.tags.stage == 'production'
+ - vol.tags.sla == '24-7'
+
+- name: Create volume idempotence
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ zone: '{{ cloudscale_test_zone }}'
+ size_gb: 50
+ tags:
+ project: ansible-test
+ stage: production
+ sla: 24-7
+ register: vol
+- name: 'VERIFY: Create volume idempotence'
+ assert:
+ that:
+ - vol is successful
+ - vol is not changed
+ - vol.size_gb == 50
+ - vol.name == '{{ cloudscale_resource_prefix }}-vol'
+ - vol.zone.slug == '{{ cloudscale_test_zone }}'
+ - vol.tags.project == 'ansible-test'
+ - vol.tags.stage == 'production'
+ - vol.tags.sla == '24-7'
+
+- name: Attach existing volume by name to server in check mode
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ servers:
+ - '{{ server.uuid }}'
+ check_mode: yes
+ register: vol
+- name: 'VERIFY: Attach existing volume by name to server in check mode'
+ assert:
+ that:
+ - vol is successful
+ - vol is changed
+ - server.uuid not in vol.servers
+
+- name: Attach existing volume by name to server
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ servers:
+ - '{{ server.uuid }}'
+ register: vol
+- name: 'VERIFY: Attach existing volume by name to server'
+ assert:
+ that:
+ - vol is successful
+ - vol is changed
+ - server.uuid in vol.servers | map(attribute="uuid")
+
+- name: Attach existing volume by name to server idempotence
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-vol'
+ servers:
+ - '{{ server.uuid }}'
+ register: vol
+- name: 'VERIFY: Attach existing volume by name to server idempotence'
+ assert:
+ that:
+ - vol is successful
+ - vol is not changed
+ - server.uuid in vol.servers | map(attribute="uuid")
+
+- name: Resize attached volume by UUID in check mode
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ vol.uuid }}'
+ size_gb: 100
+ check_mode: yes
+ register: vol
+- name: 'VERIFY: Resize attached volume by UUID in check mode'
+ assert:
+ that:
+ - vol is successful
+ - vol is changed
+ - vol.size_gb == 50
+
+- name: Resize attached volume by UUID
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ vol.uuid }}'
+ size_gb: 100
+ register: vol
+- name: 'VERIFY: Resize attached volume by UUID'
+ assert:
+ that:
+ - vol is successful
+ - vol is changed
+ - vol.size_gb == 100
+
+- name: Resize attached volume by UUID idempotence
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ vol.uuid }}'
+ size_gb: 100
+ register: vol
+- name: 'VERIFY: Resize attached volume by UUID idempotence'
+ assert:
+ that:
+ - vol is successful
+ - vol is not changed
+ - vol.size_gb == 100
+
+- name: Delete attached volume by UUID in check mode
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ vol.uuid }}'
+ state: 'absent'
+ check_mode: yes
+ register: deleted
+- name: 'VERIFY: Delete attached volume by UUID in check mode'
+ assert:
+ that:
+ - deleted is successful
+ - deleted is changed
+ - deleted.state == 'present'
+ - deleted.uuid == vol.uuid
+ - deleted.name == '{{ cloudscale_resource_prefix }}-vol'
+
+- name: Delete attached volume by UUID
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ vol.uuid }}'
+ state: 'absent'
+ register: deleted
+- name: 'VERIFY: Delete attached volume by UUID'
+ assert:
+ that:
+ - deleted is successful
+ - deleted is changed
+ - deleted.state == 'absent'
+ - deleted.uuid == vol.uuid
+ - deleted.name == '{{ cloudscale_resource_prefix }}-vol'
+
+- name: Delete attached volume by UUID idempotence
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ vol.uuid }}'
+ state: 'absent'
+ register: deleted
+- name: 'VERIFY: Delete attached volume by UUID idempotence'
+ assert:
+ that:
+ - deleted is successful
+ - deleted is not changed
+ - deleted.state == 'absent'
+ - deleted.uuid == vol.uuid
+ - not deleted.name
+
+- name: Create bulk volume and attach
+ cloudscale_ch.cloud.volume:
+ name: '{{ cloudscale_resource_prefix }}-bulk'
+ type: bulk
+ zone: '{{ cloudscale_test_zone }}'
+ size_gb: 100
+ servers:
+ - '{{ server.uuid }}'
+ register: bulk
+- name: 'VERIFY: Create bulk volume and attach'
+ assert:
+ that:
+ - bulk is successful
+ - bulk is changed
+ - bulk.size_gb == 100
+ - server.uuid in bulk.servers | map(attribute="uuid")
+
+- name: Detach volume by UUID
+ cloudscale_ch.cloud.volume:
+ uuid: '{{ bulk.uuid }}'
+ servers: []
+ register: bulk
+- name: 'VERIFY: Detach volume by UUID'
+ assert:
+ that:
+ - bulk is successful
+ - bulk is changed
+ - bulk.servers == []
+
+- name: Resize detached volume by name
+ cloudscale_ch.cloud.volume:
+ name: '{{ bulk.name }}'
+ size_gb: 200
+ register: bulk
+- name: 'VERIFY: Resize detached volume by name'
+ assert:
+ that:
+ - bulk is successful
+ - bulk is changed
+ - bulk.size_gb == 200
+
+- name: Delete volume by name in check mode
+ cloudscale_ch.cloud.volume:
+ name: '{{ bulk.name }}'
+ state: 'absent'
+ check_mode: yes
+ register: bulk
+- name: 'VERIFY: Delete volume by name'
+ assert:
+ that:
+ - bulk is successful
+ - bulk is changed
+ - bulk.state == 'present'
+
+- name: Delete volume by name
+ cloudscale_ch.cloud.volume:
+ name: '{{ bulk.name }}'
+ state: 'absent'
+ register: bulk
+- name: 'VERIFY: Delete volume by name'
+ assert:
+ that:
+ - bulk is successful
+ - bulk is changed
+ - bulk.state == 'absent'
+
+- name: Delete volume by name idempotence
+ cloudscale_ch.cloud.volume:
+ name: '{{ bulk.name }}'
+ state: 'absent'
+ register: bulk
+- name: 'VERIFY: Delete volume by name idempotence'
+ assert:
+ that:
+ - bulk is successful
+ - bulk is not changed
+ - bulk.state == 'absent'
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/requirements.txt b/ansible_collections/cloudscale_ch/cloud/tests/requirements.txt
new file mode 100644
index 000000000..4f6a277e3
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/requirements.txt
@@ -0,0 +1 @@
+netaddr
diff --git a/ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt b/ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt