diff options
Diffstat (limited to 'ansible_collections/community/digitalocean')
202 files changed, 23520 insertions, 0 deletions
diff --git a/ansible_collections/community/digitalocean/.github/workflows/ansible-test-integration.yml b/ansible_collections/community/digitalocean/.github/workflows/ansible-test-integration.yml new file mode 100644 index 00000000..d2962f21 --- /dev/null +++ b/ansible_collections/community/digitalocean/.github/workflows/ansible-test-integration.yml @@ -0,0 +1,80 @@ +name: integration +on: + push: + branches: + - main + schedule: + - cron: "10 6 * * *" + workflow_dispatch: + +concurrency: + group: cloud-integration-tests + cancel-in-progress: false + +jobs: + integration: + runs-on: ubuntu-latest + timeout-minutes: 40 + strategy: + fail-fast: false + max-parallel: 1 + matrix: + module: + - digital_ocean_account_info + - digital_ocean_balance_info + - digital_ocean_block_storage + - digital_ocean_cdn_endpoints + - digital_ocean_cdn_endpoints_info + - digital_ocean_certificate + - digital_ocean_certificate_info + - digital_ocean_database + - digital_ocean_database_info + - digital_ocean_domain + - digital_ocean_domain_info + - digital_ocean_domain_record + - digital_ocean_domain_record_info + - digital_ocean_droplet + - digital_ocean_droplet_info + - digital_ocean_firewall + - digital_ocean_firewall_info + - digital_ocean_floating_ip + - digital_ocean_floating_ip_info + - digital_ocean_image_info + - digital_ocean_kubernetes + - digital_ocean_kubernetes_info + - digital_ocean_load_balancer + - digital_ocean_load_balancer_info + - digital_ocean_monitoring_alerts + - digital_ocean_monitoring_alerts_info + - digital_ocean_project + - digital_ocean_project_info + - digital_ocean_region_info + - digital_ocean_size_info + - digital_ocean_snapshot + - digital_ocean_snapshot_info + - digital_ocean_spaces + - digital_ocean_spaces_info + - digital_ocean_sshkey + - digital_ocean_sshkey_info + - digital_ocean_tag + - digital_ocean_tag_info + - digital_ocean_volume_info + - digital_ocean_vpc + - digital_ocean_vpc_info + + steps: + - name: Perform testing + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + pre-test-cmd: >- + DO_API_KEY=${{ secrets.DO_API_KEY }} + AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} + ./tests/utils/render.sh + tests/integration/integration_config.yml.template + > tests/integration/integration_config.yml + origin-python-version: 3.9 + target: ${{ matrix.module }} + target-python-version: 3.9 + testing-type: integration + test-deps: community.general diff --git a/ansible_collections/community/digitalocean/.github/workflows/ansible-test-sanity.yml b/ansible_collections/community/digitalocean/.github/workflows/ansible-test-sanity.yml new file mode 100644 index 00000000..95cde900 --- /dev/null +++ b/ansible_collections/community/digitalocean/.github/workflows/ansible-test-sanity.yml @@ -0,0 +1,52 @@ +name: sanity +on: + pull_request: + types: [ opened, synchronize, reopened ] + push: + branches: [ main ] + schedule: + - cron: '0 6 * * *' + +jobs: + + sanity_29: + timeout-minutes: 30 + name: Sanity (â’¶$${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - stable-2.9 + runs-on: ubuntu-latest + steps: + - name: Perform testing + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + ansible-core-version: ${{ matrix.ansible }} + # pre-test-cmd: + python-version: 3.8 + target-python-version: 3.8 + testing-type: sanity + # test-deps: + + sanity: + timeout-minutes: 30 + name: Sanity (â’¶$${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - devel + runs-on: ubuntu-latest + steps: + - name: Perform testing + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + ansible-core-version: ${{ matrix.ansible }} + # pre-test-cmd: + python-version: 3.9 + target-python-version: 3.9 + testing-type: sanity + # test-deps: diff --git a/ansible_collections/community/digitalocean/.github/workflows/ansible-test-unit.yml b/ansible_collections/community/digitalocean/.github/workflows/ansible-test-unit.yml new file mode 100644 index 00000000..4b3615c1 --- /dev/null +++ b/ansible_collections/community/digitalocean/.github/workflows/ansible-test-unit.yml @@ -0,0 +1,60 @@ +name: unit +on: + pull_request: + types: [ opened, synchronize, reopened ] + push: + branches: [ main ] + schedule: + - cron: '10 6 * * *' + +jobs: + + units_29: + runs-on: ubuntu-latest + timeout-minutes: 30 + name: Units (â’¶${{ matrix.ansible }}) + strategy: + fail-fast: true + matrix: + ansible: + - stable-2.9 + steps: + - name: Perform testing + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + ansible-core-version: ${{ matrix.ansible }} + # pre-test-cmd: + python-version: 3.8 + target-python-version: 3.8 + testing-type: units + test-deps: >- + ansible.netcommon + ansible.utils + community.general + + units: + runs-on: ubuntu-latest + timeout-minutes: 30 + name: Units (â’¶${{ matrix.ansible }}) + strategy: + fail-fast: true + matrix: + ansible: + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - devel + steps: + - name: Perform testing + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + ansible-core-version: ${{ matrix.ansible }} + # pre-test-cmd: + python-version: 3.9 + target-python-version: 3.9 + testing-type: units + test-deps: >- + ansible.netcommon + ansible.utils + community.general diff --git a/ansible_collections/community/digitalocean/.github/workflows/black.yml b/ansible_collections/community/digitalocean/.github/workflows/black.yml new file mode 100644 index 00000000..ce6685b0 --- /dev/null +++ b/ansible_collections/community/digitalocean/.github/workflows/black.yml @@ -0,0 +1,16 @@ +# https://black.readthedocs.io/en/stable/integrations/github_actions.html +name: black + +on: + pull_request: + types: [ opened, synchronize, reopened ] + push: + branches: [ main ] + +jobs: + lint: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v2 + - uses: psf/black@stable diff --git a/ansible_collections/community/digitalocean/.github/workflows/pull-request-integration.yml b/ansible_collections/community/digitalocean/.github/workflows/pull-request-integration.yml new file mode 100644 index 00000000..b70f30e3 --- /dev/null +++ b/ansible_collections/community/digitalocean/.github/workflows/pull-request-integration.yml @@ -0,0 +1,152 @@ +name: pull-request-integration + +on: + pull_request_target: + branches: [main] + types: [opened, synchronize, reopened] + paths: + - plugins/module_utils/** + - plugins/modules/** + +concurrency: + group: cloud-integration-tests + cancel-in-progress: false + +env: + DEFAULT_BRANCH: remotes/origin/main + +jobs: + changes: + # Require reviewers for this environment + # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + environment: integration + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.head_ref }} + + - name: show changes files + run: | + git diff --name-only $DEFAULT_BRANCH + + - name: get changed module_utils + id: changed-module-utils + run: | + basenames=() + for file in $(git diff --name-only $DEFAULT_BRANCH | grep 'plugins/module_utils/'); do + basenames+=($(basename $file .py)) + done + printf '::set-output name=matrix::%s\n' $(printf '%s\n' "${basenames[@]}" | jq -R . | jq -sc .) + + - name: get changed modules + id: changed-modules + run: | + basenames=() + for file in $(git diff --name-only $DEFAULT_BRANCH | grep 'plugins/modules/'); do + basenames+=($(basename $file .py)) + done + printf '::set-output name=matrix::%s\n' $(printf '%s\n' "${basenames[@]}" | jq -R . | jq -sc .) + + outputs: + module-utils-matrix: ${{ steps.changed-module-utils.outputs.matrix }} + module-matrix: ${{ steps.changed-modules.outputs.matrix }} + + test-module-utils: + environment: integration + runs-on: ubuntu-latest + timeout-minutes: 120 + needs: [changes] + if: ${{ needs.changes.outputs.module-utils-matrix != '[""]' }} + strategy: + fail-fast: false + max-parallel: 1 + matrix: + module: + - digital_ocean_account_info + - digital_ocean_balance_info + - digital_ocean_block_storage + - digital_ocean_cdn_endpoints + - digital_ocean_cdn_endpoints_info + - digital_ocean_certificate + - digital_ocean_certificate_info + - digital_ocean_database + - digital_ocean_database_info + - digital_ocean_domain + - digital_ocean_domain_info + - digital_ocean_domain_record + - digital_ocean_domain_record_info + - digital_ocean_droplet + - digital_ocean_droplet_info + - digital_ocean_firewall + - digital_ocean_firewall_info + - digital_ocean_floating_ip + - digital_ocean_floating_ip_info + - digital_ocean_image_info + - digital_ocean_kubernetes + - digital_ocean_kubernetes_info + - digital_ocean_load_balancer + - digital_ocean_load_balancer_info + - digital_ocean_monitoring_alerts + - digital_ocean_monitoring_alerts_info + - digital_ocean_project + - digital_ocean_project_info + - digital_ocean_region_info + - digital_ocean_size_info + - digital_ocean_snapshot + - digital_ocean_snapshot_info + - digital_ocean_spaces + - digital_ocean_spaces_info + - digital_ocean_sshkey + - digital_ocean_sshkey_info + - digital_ocean_tag + - digital_ocean_tag_info + - digital_ocean_volume_info + - digital_ocean_vpc + - digital_ocean_vpc_info + steps: + - name: Perform testing (all modules) + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + git-checkout-ref: ${{ github.event.pull_request.head.sha }} + pre-test-cmd: >- + DO_API_KEY=${{ secrets.DO_API_KEY }} + AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} + ./tests/utils/render.sh + tests/integration/integration_config.yml.template + > tests/integration/integration_config.yml + origin-python-version: 3.9 + target-python-version: 3.9 + testing-type: integration + test-deps: community.general + target: ${{ matrix.module }} + + test-modules: + environment: integration + runs-on: ubuntu-latest + timeout-minutes: 40 + needs: [changes] + if: ${{ needs.changes.outputs.module-utils-matrix == '[""]' && needs.changes.outputs.module-matrix != '[""]' }} + strategy: + fail-fast: false + matrix: + module: ${{ fromJSON(needs.changes.outputs.module-matrix) }} + steps: + - name: Perform testing (changed module) + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + git-checkout-ref: ${{ github.event.pull_request.head.sha }} + pre-test-cmd: >- + DO_API_KEY=${{ secrets.DO_API_KEY }} + AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} + ./tests/utils/render.sh + tests/integration/integration_config.yml.template + > tests/integration/integration_config.yml + origin-python-version: 3.9 + target-python-version: 3.9 + testing-type: integration + test-deps: community.general + target: ${{ matrix.module }} diff --git a/ansible_collections/community/digitalocean/.gitignore b/ansible_collections/community/digitalocean/.gitignore new file mode 100644 index 00000000..76f82320 --- /dev/null +++ b/ansible_collections/community/digitalocean/.gitignore @@ -0,0 +1,132 @@ +/tests/output/ +/tests/integration/integration_config.yml + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/ansible_collections/community/digitalocean/CHANGELOG.rst b/ansible_collections/community/digitalocean/CHANGELOG.rst new file mode 100644 index 00000000..3f7009fc --- /dev/null +++ b/ansible_collections/community/digitalocean/CHANGELOG.rst @@ -0,0 +1,461 @@ +==================================== +Community DigitalOcean Release Notes +==================================== + +.. contents:: Topics + + +v1.23.0 +======= + +Minor Changes +------------- + +- digital_ocean_load_balancer - add support for C(size_unit) over deprecated C(size); deprecate C(algorithm) completely (https://github.com/ansible-collections/community.digitalocean/issues/270). +- documentation - refresh the "Testing and Development" section of the C(README.md) (https://github.com/ansible-collections/community.digitalocean/issues/268). +- integration tests - add a dedicated integration test for C(digital_ocean_database_info) (https://github.com/ansible-collections/community.digitalocean/issues/289). +- integration tests - set pull request integration tests to run against branch instead of last commit (https://github.com/ansible-collections/community.digitalocean/issues/291). + +Bugfixes +-------- + +- inventory plugin - bugfix for baseurl parameter (https://github.com/ansible-collections/community.digitalocean/pull/297). +- integration tests - add missing `environment` directive on pull request integration testing (https://github.com/ansible-collections/community.digitalocean/issues/293). + +v1.22.0 +======= + +Minor Changes +------------- + +- collection - added an action group C(community.digitalocean.all) for use with module defaults (https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html) (https://github.com/ansible-collections/community.digitalocean/issues/281). +- digital_ocean_vpc - add C(vpc) key to returned VPC data on create (https://github.com/ansible-collections/community.digitalocean/issues/276). +- integration tests - perform integration testing on all modules for changes in C(plugins/module_utils) or by changed module in C(plugins/modules) (https://github.com/ansible-collections/community.digitalocean/issues/286). +- integration tests - split the integration tests by module and run them serially (https://github.com/ansible-collections/community.digitalocean/issues/280). + +v1.21.0 +======= + +Minor Changes +------------- + +- digital_ocean - add sanity test ignores for Ansible 2.12 and 2.13 (https://github.com/ansible-collections/community.digitalocean/issues/247). + +Bugfixes +-------- + +- digital_ocean_droplet - if the JSON response lacks a key and the associated variable is set to ``None``, then don't treat that variable like a ``dict`` and call ``get()`` on it without first testing it (https://github.com/ansible-collections/community.digitalocean/issues/272). + +v1.20.0 +======= + +Minor Changes +------------- + +- digital_ocean_cdn_endpoints - update Spaces endpoint and add a few delays to the integration test (https://github.com/ansible-collections/community.digitalocean/issues/267). +- digital_ocean_load_balancer - Allow creating a load balancer and associating droplets by tag as an alternative to ``droplet_ids``. + +Bugfixes +-------- + +- digital_ocean_droplet - fix regression in droplet deletion where ``name`` and ``unique_name`` (set to true) are required and ``id`` alone is insufficient (though ``id`` is sufficient to uniquely identify a droplet for deletion). (https://github.com/ansible-collections/community.digitalocean/issues/260) +- digital_ocean_droplet - fix regression where droplet info (for example networking) doesn't update when waiting during creation unless ``unique_name`` is set to true (https://github.com/ansible-collections/community.digitalocean/issues/220). + +v1.19.0 +======= + +Minor Changes +------------- + +- digital_ocean - reference C(DO_API_TOKEN) consistently in module documentation and examples (https://github.com/ansible-collections/community.digitalocean/issues/248). + +Bugfixes +-------- + +- digital_ocean_cdn_endpoints - remove non-API parameters before posting to the API (https://github.com/ansible-collections/community.digitalocean/issues/252). +- digital_ocean_cdn_endpoints - use the correct module name in the C(EXAMPLES) (https://github.com/ansible-collections/community.digitalocean/issues/251). + +v1.18.0 +======= + +Minor Changes +------------- + +- ci - adding stable-2.13 to sanity and unit testing (https://github.com/ansible-collections/community.digitalocean/issues/239). +- digital_ocean_spaces - set C(no_log=True) for C(aws_access_key_id) parameter (https://github.com/ansible-collections/community.digitalocean/issues/243). +- digital_ocean_spaces_info - set C(no_log=True) for C(aws_access_key_id) parameter (https://github.com/ansible-collections/community.digitalocean/issues/243). + +v1.17.0 +======= + +Minor Changes +------------- + +- digital_ocean - parameterize the DigitalOcean API base url (https://github.com/ansible-collections/community.digitalocean/issues/237). + +v1.16.0 +======= + +Minor Changes +------------- + +- black test - added a 15 minute timeout (https://github.com/ansible-collections/community.digitalocean/issues/228). +- digital_ocean_domain - add support for IPv6 apex domain records (https://github.com/ansible-collections/community.digitalocean/issues/226). +- integration tests - added a 120 minute timeout (https://github.com/ansible-collections/community.digitalocean/issues/228). +- sanity and unit tests - added a 30 minute timeout (https://github.com/ansible-collections/community.digitalocean/issues/228). + +Bugfixes +-------- + +- digital_ocean_kubernetes - add missing elements type to C(node_pools.tags) and C(node_pools.taints) options (https://github.com/ansible-collections/community.digitalocean/issues/232). + +New Modules +----------- + +- digital_ocean_domain_record_info - Gather information about DigitalOcean domain records + +v1.15.1 +======= + +Minor Changes +------------- + +- Updates DigitalOcean API documentation links to current domain with working URL anchors (https://github.com/ansible-collections/community.digitalocean/issues/223). + +Bugfixes +-------- + +- digital_ocean_droplet - fix reporting of changed state when ``firewall`` argument is present (https://github.com/ansible-collections/community.digitalocean/pull/219). + +v1.15.0 +======= + +Bugfixes +-------- + +- digital_ocean_droplet - move Droplet data under "droplet" key in returned payload (https://github.com/ansible-collections/community.digitalocean/issues/211). + +New Modules +----------- + +- digital_ocean_spaces - Create and remove DigitalOcean Spaces. +- digital_ocean_spaces_info - List DigitalOcean Spaces. + +v1.14.0 +======= + +Minor Changes +------------- + +- digital_ocean_kubernetes_info - switching C(changed=True) to C(changed=False) since getting information is read-only in nature (https://github.com/ansible-collections/community.digitalocean/issues/204). + +Bugfixes +-------- + +- Update README.md with updated Droplet examples (https://github.com/ansible-collections/community.digitalocean/issues/199). +- digital_ocean_cdn_endpoints - defaulting optional string parameters as strings (https://github.com/ansible-collections/community.digitalocean/issues/205). +- digital_ocean_cdn_endpoints - updating Spaces endpoint for the integration test (https://github.com/ansible-collections/community.digitalocean/issues/205). +- digital_ocean_droplet - ensure that Droplet creation is successful (https://github.com/ansible-collections/community.digitalocean/issues/197). +- digital_ocean_droplet - fixing project assignment for the C(unique_name=False) case (https://github.com/ansible-collections/community.digitalocean/issues/201). +- digital_ocean_droplet - update Droplet examples (https://github.com/ansible-collections/community.digitalocean/issues/199). + +v1.13.0 +======= + +Minor Changes +------------- + +- Set Python 3.9 as the C(python-version) and C(target-python-version) in the integration, sanity, and unit tests for Ansible > 2.9 (3.8 otherwise). +- digital_ocean_droplet - allow the user to override the Droplet action and status polling interval (https://github.com/ansible-collections/community.digitalocean/issues/194). +- digital_ocean_kubernetes - adding support for HA control plane (https://github.com/ansible-collections/community.digitalocean/issues/190). + +v1.12.0 +======= + +Minor Changes +------------- + +- digital_ocean_block_storage - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). +- digital_ocean_database - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). +- digital_ocean_domain - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). +- digital_ocean_droplet - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). +- digital_ocean_droplet - adding ability to apply and remove firewall by using droplet module (https://github.com/ansible-collections/community.digitalocean/issues/159). +- digital_ocean_droplet - require unique_name for state=absent to avoid unintentional droplet deletions. +- digital_ocean_firewall - inbound_rules and outbound_rules are no longer required for firewall removal (https://github.com/ansible-collections/community.digitalocean/issues/181). +- digital_ocean_floating_ip - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). +- digital_ocean_floating_ip - adding attach and detach states to floating ip module (https://github.com/ansible-collections/community.digitalocean/issues/170). +- digital_ocean_load_balancer - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). +- digitalocean integration tests - adding integration tests for CDN Endpoints (https://github.com/ansible-collections/community.digitalocean/issues/179). + +Bugfixes +-------- + +- Update the tests so that they only run once (https://github.com/ansible-collections/community.digitalocean/issues/186). +- digital_ocean_droplet - fix resizing with C(state: active) does not actually turn Droplet on (https://github.com/ansible-collections/community.digitalocean/issues/140). +- digital_ocean_kubernetes - fix return value consistency (https://github.com/ansible-collections/community.digitalocean/issues/174). + +v1.11.0 +======= + +Minor Changes +------------- + +- digitalocean inventory script - add support for Droplet tag filtering (https://github.com/ansible-collections/community.digitalocean/issues/7). + +Bugfixes +-------- + +- Adding missing status badges for black and unit tests (https://github.com/ansible-collections/community.digitalocean/pull/164). +- Documentation URLs are fixed for the C(digital_ocean_domain_record) and C(digital_ocean_droplet_info) modules (https://github.com/ansible-collections/community.digitalocean/pull/163). +- Serializing the cloud integration tests (https://github.com/ansible-collections/community.digitalocean/pull/165). +- digital_ocean_floating_ip - make floating ip return data idempotent (https://github.com/ansible-collections/community.digitalocean/pull/162). +- digitalocean inventory - enforce the C(timeout) parameter (https://github.com/ansible-collections/community.digitalocean/issues/168). + +v1.10.0 +======= + +Minor Changes +------------- + +- digital_ocean_kubernetes - adding the C(taints), C(auto_scale), C(min_nodes) and C(max_nodes) parameters to the C(node_pools) definition (https://github.com/ansible-collections/community.digitalocean/issues/157). + +Bugfixes +-------- + +- digital_ocean_block_storage - fix block volumes detach idempotency (https://github.com/ansible-collections/community.digitalocean/issues/149). +- digital_ocean_droplet - ensure "active" state before issuing "power on" action (https://github.com/ansible-collections/community.digitalocean/issues/150) +- digital_ocean_droplet - power on should poll/wait, resize should support "active" state (https://github.com/ansible-collections/community.digitalocean/pull/143). +- digital_ocean_load_balancer - C(droplet_ids) are not required when C(state=absent) is chosen (https://github.com/ansible-collections/community.digitalocean/pull/147). +- digital_ocean_load_balancer - when C(state=absent) is chosen the API returns an empty response (https://github.com/ansible-collections/community.digitalocean/pull/147). + +New Modules +----------- + +- digital_ocean_cdn_endpoints - Create and delete DigitalOcean CDN Endpoints +- digital_ocean_cdn_endpoints_info - Gather information about DigitalOcean CDN Endpoints +- digital_ocean_load_balancer - Manage DigitalOcean Load Balancers +- digital_ocean_monitoring_alerts - Create and delete DigitalOcean Monitoring alerts +- digital_ocean_monitoring_alerts_info - Gather information about DigitalOcean Monitoring alerts + +v1.9.0 +====== + +Minor Changes +------------- + +- digital_ocean - running and enforcing psf/black in the codebase (https://github.com/ansible-collections/community.digitalocean/issues/136). +- digital_ocean_floating_ip_info - new integration test for the `digital_ocean_floating_ip_info` module (https://github.com/ansible-collections/community.digitalocean/issues/130). + +Bugfixes +-------- + +- digital_ocean_database - increase the database creation integration test timeout (https://github.com/ansible-collections/community.digitalocean). +- digital_ocean_floating_ip - delete all Floating IPs initially during the integration test run (https://github.com/ansible-collections/community.digitalocean/issues/129). +- digitalocean inventory - respect the TRANSFORM_INVALID_GROUP_CHARS configuration setting (https://github.com/ansible-collections/community.digitalocean/pull/138). +- info modules - adding missing check mode support (https://github.com/ansible-collections/community.digitalocean/issues/139). + +v1.8.0 +====== + +Minor Changes +------------- + +- digital_ocean_database - add support for MongoDB (https://github.com/ansible-collections/community.digitalocean/issues/124). + +Bugfixes +-------- + +- digital_ocean - integration tests need community.general and jmespath (https://github.com/ansible-collections/community.digitalocean/issues/121). +- digital_ocean_firewall - fixed idempotence (https://github.com/ansible-collections/community.digitalocean/issues/122). + +v1.7.0 +====== + +Minor Changes +------------- + +- digital_ocean_kubernetes - set "latest" as the default version for new clusters (https://github.com/ansible-collections/community.digitalocean/issues/114). + +Bugfixes +-------- + +- digital_ocean_certificate - fixing integration test (https://github.com/ansible-collections/community.digitalocean/issues/114). +- digital_ocean_droplet - state `present` with `wait` was not waiting (https://github.com/ansible-collections/community.digitalocean/issues/116). +- digital_ocean_firewall - fixing integration test (https://github.com/ansible-collections/community.digitalocean/issues/114). +- digital_ocean_tag - fixing integration test (https://github.com/ansible-collections/community.digitalocean/issues/114). +- digitalocean - update README.md with project_info and project module (https://github.com/ansible-collections/community.digitalocean/pull/112). + +New Modules +----------- + +- digital_ocean_snapshot - Create and delete DigitalOcean snapshots +- digital_ocean_vpc - Create and delete DigitalOcean VPCs +- digital_ocean_vpc_info - Gather information about DigitalOcean VPCs + +v1.6.0 +====== + +Bugfixes +-------- + +- digital_ocean_certificate_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). +- digital_ocean_domain_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). +- digital_ocean_firewall_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). +- digital_ocean_load_balancer_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). +- digital_ocean_tag_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). +- digitalocean inventory plugin - attributes available to filters are limited to explicitly required attributes and are prefixed with ``var_prefix`` (https://github.com/ansible-collections/community.digitalocean/pull/102). + +New Modules +----------- + +- digital_ocean_project - Manage a DigitalOcean project +- digital_ocean_project_info - Gather information about DigitalOcean Projects + +v1.5.1 +====== + +Bugfixes +-------- + +- digitalocean inventory plugin - Wire up advertised caching functionality (https://github.com/ansible-collections/community.digitalocean/pull/97). + +v1.5.0 +====== + +Minor Changes +------------- + +- digitalocean - Filter droplets in dynamic inventory plugin using arbitrary. jinja2 expressions (https://github.com/ansible-collections/community.digitalocean/pull/96). +- digitalocean - Support templates in API tokens when using the dynamic inventory plugin (https://github.com/ansible-collections/community.digitalocean/pull/98). + +Bugfixes +-------- + +- digital_ocean_database - Fixed DB attribute settings (https://github.com/ansible-collections/community.digitalocean/issues/94). +- digital_ocean_database_info - Cleanup unused attribs (https://github.com/ansible-collections/community.digitalocean/pulls/100). +- digital_ocean_snapshot_info - Fix lookup of snapshot_info by_id (https://github.com/ansible-collections/community.digitalocean/issues/92). +- digital_ocean_tag - Fix tag idempotency (https://github.com/ansible-collections/community.digitalocean/issues/61). + +v1.4.2 +====== + +Bugfixes +-------- + +- digital_ocean_droplet - Fixed Droplet inactive state (https://github.com/ansible-collections/community.digitalocean/pull/88). +- digital_ocean_sshkey - Fixed SSH Key Traceback Issue (https://github.com/ansible-collections/community.digitalocean/issues/68). + +v1.4.1 +====== + +Bugfixes +-------- + +- digital_ocean_droplet - Add integration tests for Droplet active and inactive states (https://github.com/ansible-collections/community.digitalocean/issues/66). +- digital_ocean_droplet - Fix Droplet inactive state (https://github.com/ansible-collections/community.digitalocean/issues/83). + +v1.4.0 +====== + +Bugfixes +-------- + +- digital_ocean_droplet_info - Fix documentation link for `digital_ocean_droplet_info` (https://github.com/ansible-collections/community.digitalocean/pull/81). +- digitalocean - Fix return docs for digital_ocean_sshkey_info (https://github.com/ansible-collections/community.digitalocean/issues/56). +- digitalocean - Update README.md for K8s and databases (https://github.com/ansible-collections/community.digitalocean/pull/80). + +New Modules +----------- + +- digital_ocean_droplet_info - Gather information about DigitalOcean Droplets + +v1.3.0 +====== + +New Modules +----------- + +- digital_ocean_database - Create and delete a DigitalOcean database +- digital_ocean_database_info - Gather information about DigitalOcean databases +- digital_ocean_kubernetes - Create and delete a DigitalOcean Kubernetes cluster +- digital_ocean_kubernetes_info - Returns information about an existing DigitalOcean Kubernetes cluster + +v1.2.0 +====== + +Minor Changes +------------- + +- digital_ocean - ``ssh_key_ids`` list entries are now validated to be strings (https://github.com/ansible-collections/community.digitalocean/issues/13). +- digital_ocean_droplet - ``ssh_keys``, ``tags``, and ``volumes`` list entries are now validated to be strings (https://github.com/ansible-collections/community.digitalocean/issues/13). +- digital_ocean_droplet - adding ``active`` and ``inactive`` states (https://github.com/ansible-collections/community.digitalocean/issues/23). +- digital_ocean_droplet - adds Droplet resize functionality (https://github.com/ansible-collections/community.digitalocean/issues/4). + +Bugfixes +-------- + +- digital_ocean inventory script - fail cleaner on invalid ``HOST`` argument to ``--host`` option (https://github.com/ansible-collections/community.digitalocean/pull/44). +- digital_ocean inventory script - implement unimplemented ``use_private_network`` option and register missing ``do_ip_address``, ``do_private_ip_address`` host vars (https://github.com/ansible-collections/community.digitalocean/pull/45/files). +- digital_ocean inventory script - return JSON consistent with specification with ``--host`` (https://github.com/ansible-collections/community.digitalocean/pull/44). +- digital_ocean_domain - return zone records when creating a new zone (https://github.com/ansible-collections/community.digitalocean/issues/46). +- digital_ocean_droplet - add missing ``required=True`` on ``do_oauth_token`` in ``argument_spec`` (https://github.com/ansible-collections/community.digitalocean/issues/13). +- digital_ocean_floating_ip - fixes idempotence (https://github.com/ansible-collections/community.digitalocean/issues/5). + +New Modules +----------- + +- digital_ocean_balance_info - Display DigitalOcean customer balance + +v1.1.1 +====== + +Bugfixes +-------- + +- digitalocean - Drop collection version from README.md (https://github.com/ansible-collections/community.digitalocean/issues/63). + +v1.1.0 +====== + +Minor Changes +------------- + +- digital_ocean_block_storage - included ability to resize Block Storage Volumes (https://github.com/ansible-collections/community.digitalocean/issues/38). + +Bugfixes +-------- + +- digital_ocean_certificate_info - fix retrieving certificate by ID (https://github.com/ansible-collections/community.digitalocean/issues/35). +- digital_ocean_domain - module is now idempotent when called without IP (https://github.com/ansible-collections/community.digitalocean/issues/21). +- digital_ocean_load_balancer_info - fix retrieving load balancer by ID (https://github.com/ansible-collections/community.digitalocean/issues/35). + +New Plugins +----------- + +Inventory +~~~~~~~~~ + +- digitalocean - DigitalOcean Inventory Plugin + +New Modules +----------- + +- digital_ocean_domain_record - Manage DigitalOcean domain records +- digital_ocean_firewall - Manage cloud firewalls within DigitalOcean + +v1.0.0 +====== + +Bugfixes +-------- + +- Sanity test documentation fixes (https://github.com/ansible-collections/community.digitalocean/pull/3). +- Update docs examples to use FQCN (https://github.com/ansible-collections/community.digitalocean/issues/14). + +v0.1.0 +====== + +Release Summary +--------------- + +Initial release of the collection after extracing the modules from `community.general <https://github.com/ansible-collections/community.general/>`_. diff --git a/ansible_collections/community/digitalocean/COPYING b/ansible_collections/community/digitalocean/COPYING new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/ansible_collections/community/digitalocean/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>. diff --git a/ansible_collections/community/digitalocean/FILES.json b/ansible_collections/community/digitalocean/FILES.json new file mode 100644 index 00000000..00d671b0 --- /dev/null +++ b/ansible_collections/community/digitalocean/FILES.json @@ -0,0 +1,2371 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/ansible-test-integration.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a67c8b8bd855984828a5dff70eeede2fb375ce727c4b762eb058f5afa74cbb78", + "format": 1 + }, + { + "name": ".github/workflows/ansible-test-sanity.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d389347280fa81421b17f97ad4b1d9bad54e223e54b81ab875bbbb52590add35", + "format": 1 + }, + { + "name": ".github/workflows/ansible-test-unit.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0284cac0a096014cd45aca0a6cd9a8a48d378017e88b56b3f818b2320d419a70", + "format": 1 + }, + { + "name": ".github/workflows/black.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "561aecfa4d554c62e1a4a054807e70b8a5fdff72cfb36dfe9327379959068545", + "format": 1 + }, + { + "name": ".github/workflows/pull-request-integration.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "798a09115f8346b3bb044a5952f856b7f7e604fc6876ef2d62fba809f6aa0def", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/.keep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "changelogs/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "919ef00776e7d2ff349950ac4b806132aa9faf006e214d5285de54533e443b33", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbdbadc2141b0d3aef5586c1e954d5ad3275c55477c78718c7bd8991379c4f69", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b07ba10f6c28a1fb05c5b0e7193bf5dfcb778dc2c003a98595d73e64fc7f916", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2432e09dce34a1cfc37b5ac852390a4ee5c773bc91fafc4c4d07d557c7c77c7c", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/digital_ocean.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d46c48309bd83fb6d9517fe348b0f79abed2c9936a745c8b93500bbb50ad1ab5", + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/digitalocean.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b52a388bd444cb45d380369faf380aa9c5f9459d8db6aac3e2f1642539363a27", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/digital_ocean.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1185211388225f955a5790fa43b7f6d58e81309a279f64bfa70ce78c68fed4d", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_account_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8b4274d9a8a35db8b50c2279c2f767981c18d4f880e1ad513376c11d583d132", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_certificate_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ae2a95406c23e45316f6eee2a77464cfa5416811dbe0d7ebfb2de1cd7ba6a83", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_domain_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fe8792781f62917f16ac9c37e753fb92b0a1794ad93a2168716089aacfc576d", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_firewall_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff6863d0b9479612fb8a17233175be7de7dd6dc7f565480b732aadfbf5c41374", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_floating_ip_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea7cd06fc9a2972cd088ea3a9ad892eec50f03e3d00e7abe2b60b0bd9c3ac899", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_image_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afee0ba7cd2175b3b8587ff416bb144fab16a4462497b774e6436f104140c587", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_load_balancer_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c737dfa66099c3dcc0910943bbb24dfc8cd64642bacbda9414cf1f9bca70ddf", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_region_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1132eda930d5d877565db549e7077129663e7a2c4075aaa5c0bda70ff1869d50", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_size_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac563bbad59ba84b8229e9bc6ecb26ca2d82a293928657ecd5471763ebb9cd18", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_snapshot_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbe853e6f601cf6a8d219051bd71289f38b663558dd910c578c2a550835e37e2", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_sshkey_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a568a1c2cfaf55d1ee0923cf83120c897c85cf185ba463dcb29e66c96d6435f6", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_tag_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a4e16316df146fd9c06637d78fe75e82b8c4d030a5ad8a437b594387141ff64", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_volume_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83a2a92a6218b5bab9bb6c562c4aabd72fab85b34ff7f605e49a4644ee374b29", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4321ebab46ec5f699aabe4b29e713d79b923bb8a4fcd7c9397408f9f81b4dbdc", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_account_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8b4274d9a8a35db8b50c2279c2f767981c18d4f880e1ad513376c11d583d132", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_balance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3f17516fe7b562e2096fc1a34e175133ccc1d4e48098cfe85a05ad07e2b4d0d", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_block_storage.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47750f27efd168f758b5fd454471aa0d0e0dac284f23982c6f0626b4f40f81a2", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_cdn_endpoints.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6dba98b5eca51961dbb7ed850f89735b46f17d06e56059134bde982fc43c3d84", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_cdn_endpoints_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "598f08b64245ad33eb6879e93d61e7b33536742dc86efc21f6487125381fcd4d", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0050cef948a3ceee734a9a051bca063772dc2bed71b5c106bae79cc53b1a44b1", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_certificate_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ae2a95406c23e45316f6eee2a77464cfa5416811dbe0d7ebfb2de1cd7ba6a83", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_database.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72c503eb70525affe7662637419f2e0594546e2a9c3041cdf99421e59ceaa3e8", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_database_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6a9f1fb48b0f93d84a5bf0868a23ea88fd34d423a8a4825e2ade8621e96a6d7", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e54c54dabab4d90b9e3dfe2fd9320944b8965b19f619270e4deb3c05a9a130cd", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_domain_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fe8792781f62917f16ac9c37e753fb92b0a1794ad93a2168716089aacfc576d", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_domain_record.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "296147c5b23385199f2e9955f884f2ffd7568f600737a2f575536e9824e6bf9b", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_domain_record_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fab0194b7a74c527438e89c8fa7801dbb9532a32f5d815d529eb9be2fb965162", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_droplet.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a12ddb9dd84857a50562ec74633dd698cdc834d5b134db1edbf9efaebb55b830", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_droplet_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "428938d5080a9fda0e69737a94c6aeaf132aa5c67d380252e4cbfba7a50243b6", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_firewall.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3019358bce84b27244f75e6d0fc70186c8f3bec143a06f996320edf7155fb3b1", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_firewall_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff6863d0b9479612fb8a17233175be7de7dd6dc7f565480b732aadfbf5c41374", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_floating_ip.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3fbbad1f805bbe44ea062c2f7751efa0e27696ebaf66d31c04f437ffb0e12d9", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_floating_ip_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea7cd06fc9a2972cd088ea3a9ad892eec50f03e3d00e7abe2b60b0bd9c3ac899", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_image_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afee0ba7cd2175b3b8587ff416bb144fab16a4462497b774e6436f104140c587", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_kubernetes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8288f64dcbc7fbdf3da017fe32cafb9c8068f8dca53538fca7882377695a3a78", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_kubernetes_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "967ae0016638341fccc56dd1145cbf868dc41a37e02ce214b16d3fede5be56a4", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_load_balancer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75abb34818ff0265136d898f205a7b02195e529d93232ac898d9a8fc49824ea1", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_load_balancer_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c737dfa66099c3dcc0910943bbb24dfc8cd64642bacbda9414cf1f9bca70ddf", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_monitoring_alerts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66a2d2ade7c6c6d58f5add506d5deb16b41d4e718315a16c0d48d67032e28af8", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_monitoring_alerts_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e291415d6d626258458ea42a32b533e51c5f4dd1f56c18ec20c4e77cebd20689", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_project.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa446935175c0b6d7ac4e97a9ed865302d73f89561432a6eca92859e0595cd15", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_project_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80d2c5131ea4250e4700a0e991f270e08cc314f3e9d5ce1d3d6abbc5b035c7b3", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_region_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1132eda930d5d877565db549e7077129663e7a2c4075aaa5c0bda70ff1869d50", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_size_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac563bbad59ba84b8229e9bc6ecb26ca2d82a293928657ecd5471763ebb9cd18", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0d3daa6dbd393e3cf1b5fad59ee5c212de50a572c98af9ccd7a7305a609f0be", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_snapshot_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbe853e6f601cf6a8d219051bd71289f38b663558dd910c578c2a550835e37e2", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_spaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d2460847d8b52a31298f356b156f8498fa438e6e2046168486966b4d82e4cd6", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_spaces_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7b72a7b0aff56fdde7852f985b9357d1ca0bf1b96c3fa377f18016d9f286c00", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_sshkey.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e2c3485ffac260810929615b2f31ff9aff1a694963dda371ffd90a2a150ac47", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_sshkey_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a568a1c2cfaf55d1ee0923cf83120c897c85cf185ba463dcb29e66c96d6435f6", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_tag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c084a95dd60c1f9530b2f1e9ce572473f6ff2092fc1fc87174f4d8fe012289d5", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_tag_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a4e16316df146fd9c06637d78fe75e82b8c4d030a5ad8a437b594387141ff64", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_volume_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83a2a92a6218b5bab9bb6c562c4aabd72fab85b34ff7f605e49a4644ee374b29", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_vpc.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3770b51fa942f01f83ec76515fb970e0c18e3f6e470f02bdfc3eb710bf0f198e", + "format": 1 + }, + { + "name": "plugins/modules/digital_ocean_vpc_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f413316213e0f87be397786e3dd9ec0c3e10a918ae728606eaba0cc2d6027a6e", + "format": 1 + }, + { + "name": "scripts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "scripts/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "scripts/inventory/digital_ocean.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e4ba7e652f9e7d46868d32bf08047bd209745e72d6fd3599665c15c013c77a9", + "format": 1 + }, + { + "name": "scripts/inventory/digital_ocean.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2e479be0c22ec57b873b2503b930a2b0d2198e34a6b812689f1bcf59dc5e7b5", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_account_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_account_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_account_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fbc3c7fa048c45c1959fa9e21442762b395dd787674e3dcf0239b3ec3eee957", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_account_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_balance_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_balance_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_balance_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_balance_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_balance_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a886b6634b7c7c08e9cd83bf6a113800ea8b27c9edc4df47c9d9ce5630430da", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_balance_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_block_storage", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_block_storage/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_block_storage/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e49ef1125342347a96e74f0657a6209c696aa26b06348ab7b3394b3d85098a0e", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_block_storage/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_block_storage/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00baf1f1cfe6c9465a45d20843dab79dd37fff372c3dcb6a5b3d62f7eebd2cb1", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_block_storage/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ff39e0322373e8f528222ae121816994c5b589475d8724421b11a587492a883", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87e44cb5ec915a77c895bbcc986c6e5eea1f98ff3104c976034624389c6452b1", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b97150f66d5200bca798569c3e90dac5f65c225b8ae520163d63763e1a26c38", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_cdn_endpoints_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "764affaa42ccb4279b28791b95e4ff79392ac25ea27fd0d57a44e9e8f25507a6", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ddf38ed40555131b321f7fb6b1c76d6ce0624b9a7a3adb6051641ce2f40e7a7", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87a4c9a04a4809f0b9f7140757a43268dea56dcefae761bf3dd30408e408bd28", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_certificate_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc10b186fce310bd7e9113ceb335ec40653d15d95c8f54b1092c1f8f5c35826d", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "662c329f90410940e3614e87ba39603a893d66390e425164f390ad945dba3a10", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68b6a182a16a00b3b46007f108c80a8e61cfe292860d66f7a53536ea1d2ba5a8", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_database_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73bff90365210f2367cc2ef3970a42c33ecfa1dd927aa4e1d9645736dcabfe0b", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd910b1da220db012f55762328086626adc538caccd2f144441fb826688d31e2", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8a0736065ff0da261f1a04ff12759f1d66019259356606ef5247c2eadf1ba25", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_record_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_record_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_record_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "abf8231154053d4c5f7fd5cdf05af47fa9aeb2205bfd4f01bfe390fb9340baab", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_record_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_record_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61af086dffe34ac7ea26d2f2ededaa450a56e98087d3fbd040bb8f275a3be216", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_domain_record_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b03756c08514bcfd0372763442625c178e630147a4a6a2e43d2c0aae4db499cf", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e77539ee7523840b7eba12bdd43ae17be38f57b3adf57a17f8208b2108c8fd5a", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e782ba54577d1e3b4bf63e5a7faa98c7aa058162b0c5acaaaf5449d746e1cde", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_droplet_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d4aa47098d194c8ec9a33ebc422fac18e95f6b344a60eefc7c0e7a412130fc67", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5dcd18de11e724fcdc652d5b2b6561e4b8d0536b2141261d02902d6589cd67a", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "563fa59e2f6b1e9efa8fc599b61cee6cf7043b64ad80423e86c5ee2b734a71ee", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_firewall_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1dd07d9bf81cb7c5c370f4e62a241084ec1040ce01044ec72118e557953a266", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89fcaefbf91e5e6ba04774b00a5148e2f0b55ae0c9ab67d2cb33f6c0295941c0", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf5a05fb83861c99fb28a46b3cbc5b50079a00262c6ea8a4b1b2e3bd405d4828", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d422415e023b3a2f93d322c4fb1f8f7e1f8de7558a496662642c1e75c32516de", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_floating_ip_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_image_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_image_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_image_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d908407f540095fdc373b16c250678e7de8055c8b4e7171d64d40aff5aa69df", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_image_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_image_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c94255fc4ecd3d190bb2af934588a565cd307f0b932305342a267e17ef2a2391", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_image_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf1d0849c5cdf4052d15c85b8788a6e22bef9a9518669819dd4cba0667aa6816", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f90019f9e34bee677ec10cae36f7be0e2a70f75fdff49224ad704f2aac3cbafe", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e92416aeca8878d799bc64c4d0b4f78d244ef9b6f0a0ed31a16d154490f47f7", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18d5cfc3d9ecd699870b2748f5f3f9184ba3555a4e3fe4233d4899e1db47a254", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_kubernetes_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48c408854bad4e9d44ac5241ab75d42719d718f188530bd6b1f5c3a88e99b4fb", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "136d7121482f84974f4ad2bdcf5ca0c4d81396735e96baf19547b36803ca7611", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2bab35f1ac0bd8ffaab9c7376a71c3c5b0f5a40ef276bf0ec9b6adc3555282b8", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_load_balancer_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_monitoring_alerts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_monitoring_alerts/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_monitoring_alerts/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ccf1c2e066d3c59914cc39c49c1db694da5713db419b24e5dbc4f82bd5c9fd8", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_monitoring_alerts/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_monitoring_alerts/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09817951c2b159b72403fba005214047ca3f65764bf43651c27df43eb150ebfc", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_monitoring_alerts/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d342733152f6b4bccf5c6540d8c13d3b90b2b8733eaa17ca932a9005e6749e19", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6920b07f4dea085ead73d0c26d06f50102104500a5478f658b725a7efcf9ab2", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_project_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_region_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_region_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_region_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_region_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_region_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6d660d72bea0b1a4831c820769f9dd8285a1b329c900d7f9cbc1774f6d70480", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_region_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_size_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_size_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_size_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_size_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_size_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "086aa0838e94e34ba45b8703a84312621445a48c9d69cd9eac36e645a657221e", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_size_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44603460d09ae29c96ecbfac932b961f8183ce677dec478b2ff89843acbd29ff", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca03b9aece82504a35369347290c1804d8b27bc0a99f68424dd69c831bd2858c", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08606337f8cd8591eebdb9ea243ee267a302f2547c68fa6b4cdd05a86d3dd359", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcf01fb7eca3dc2d724cee5dac70b600230259f6b9ff1e06e6206bf19704bd1f", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_snapshot_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71e4ecc164beb62f9d8213d3f00d4e52427856adabccee00ee1d3043e2d0d817", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60ee4eb207bd14fb364dd100694f4e670b8742c1377270a5426bde25e5090cb7", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "220697949ba3a588678d0998217246d24e59d7a6b44d44b3eaccb300868becb4", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_spaces_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae8a0fffccfac884a13a92d7873dba516f78889b3cb3c6824b63b3bea7770f0e", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04dc648f008243f1f525352208fe76079f88c2687d4e832625339e95be42f03c", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7be10e969f96b28985d4cb0ac34d81edf2cd0d6dad11dae44099148e6d1a0748", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_sshkey_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b51bf22b8e164227935585dd7abf7d3d69615b730d9dd2cf03246fa6548501af", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67aa9979516a71f3abe31937cb557d81c836800292bb9cba086dfbb78b298bc4", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8124154b3251fb1fbbd0ea84068a774048c02116dcdf1ae34565cbe16f5bd53c", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_tag_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_volume_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_volume_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_volume_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_volume_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_volume_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ae21aa9bb287bacf4e5e04edf8821b7d3d9398a038224be8127e434da068598", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_volume_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9d79dfcc23af0cac86e6a8831faea8cf820838804c4d36c34959ec9bf9c0751", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54c0709fb77160b369a55366e932447dc75748c40f2670c0ec7736f2b196a9ca", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f644054f10f0ca5259aca9e8a8064fa8db0312ab1f4ce55688e1cadc3a81fe1b", + "format": 1 + }, + { + "name": "tests/integration/targets/digital_ocean_vpc_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/integration_config.yml.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88f1bcc8df7d52e95b740e8ebf99105c027d6172df212cf2cf999906863f9d61", + "format": 1 + }, + { + "name": "tests/integration/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56da4cacb3861cbc273cb080d600d40b078dd9f9ce32311b5519617a4c70e3e7", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "250bc5a4e41cc5073f30ba4a9272485982a48e9b3027a2db3896f62a93b0666e", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03f60ac3fee853074bdbf092860e9754cc8a02b929ba0408dce2fce673e8a4f6", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "798cff6c8c2b0cf6db97c96e06a8dafd21396145a1a56e50549a3d7705481be1", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory/test_digitalocean.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eabb2b93dc2a351d1c811c27c9c1909065f2a1087ab3e485f50fcde81fe2ee1e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_digital_ocean_kubernetes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca626d4d4b019b0b1c746c9cbcd374b705abdd6ab48ded438603e557d08ee573", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_digital_ocean_kubernetes_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9a17ffb06325115ff4a9a0c27e49a061d36ffc28e9c58f307624da17702d23f", + "format": 1 + }, + { + "name": "tests/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/render.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28ff6a30a12bbe965b5258aaa33c8d96d5210b3e9f062d38433167ecc6252dca", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f59a2bf97befc68fb5a72a0aae195a9a95e1633d28fb595fec3a559ec04761ca", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "790f9e88958fdd42bc276769f78e6cd0c147800734dcf156c8132ea57e758338", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf4814eaf041b96803d02aae15adc445979c51e802a3e658f1be5132fae2e38f", + "format": 1 + }, + { + "name": "codecov.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08263e731c1a39f4d143b80830250fa028ec6eeca4bdb9763bb3d3aed70cf076", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/community/digitalocean/MANIFEST.json b/ansible_collections/community/digitalocean/MANIFEST.json new file mode 100644 index 00000000..1f207864 --- /dev/null +++ b/ansible_collections/community/digitalocean/MANIFEST.json @@ -0,0 +1,61 @@ +{ + "collection_info": { + "namespace": "community", + "name": "digitalocean", + "version": "1.23.0", + "authors": [ + "Ansible (https://github.com/ansible)", + "BondAnthony (https://github.com/BondAnthony)", + "Akasurde (https://github.com/Akasurde)", + "pmarques (https://github.com/pmarques)", + "geerlingguy (https://www.jeffgeerling.com/)", + "Andres Hermosilla (https://github.com/rezen)", + "Luis (https://github.com/lalvarezguillen)", + "grzs (https://github.com/grzs)", + "Lucas Basquerotto (https://github.com/lucasbasquerotto)", + "Tadej Borov\u0161ak (https://github.com/tadeboro)", + "Mark Mercado (https://github.com/mamercad)", + "Mike Pontillo (https://github.com/mpontillo)", + "Felix Fontein (https://github.com/felixfontein)", + "Andrew Starr-Bochicchio (https://github.com/andrewsomething)", + "Sam Pinkus (https://github.com/sgpinkus)", + "Luis (https://github.com/lalvarezguillen)", + "John R Barker (https://github.com/gundalow)", + "Andrew Klychkov (https://github.com/Andersson007)", + "Tyler Auerbeck (https://github.com/tylerauerbeck)", + "Angel Aviel Domaoan (https://github.com/tenshiAMD)", + "Max Truxa (https://github.com/maxtruxa)", + "Franco Posa (https://github.com/francoposa)", + "magicrobotmonkey (https://github.com/magicrobotmonkey)", + "radioactive73 (https://github.com/radioactive73)", + "danxg87 (https://github.com/danxg87)", + "Sviatoslav Sydorenko (https://github.com/webknjaz)", + "Vitaly Khabarov (https://github.com/vitkhab)", + "Onur G\u00fczel (https://github.com/onurguzel)", + "Shuaib Munshi (https://github.com/shuaibmunshi)", + "Corey Wright (https://github.com/coreywright)" + ], + "readme": "README.md", + "tags": [ + "digitalocean", + "cloud", + "droplet" + ], + "description": "DigitalOcean Ansible Collection.", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/community.digitalocean", + "documentation": "https://docs.ansible.com/ansible/latest/collections/community/digitalocean/", + "homepage": "https://github.com/ansible-collections/community.digitalocean", + "issues": "https://github.com/ansible-collections/community.digitalocean/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73859de3a30b2cdcb242f4b496ef31c47fc8d36f64646d3149622c7738d1929f", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/community/digitalocean/README.md b/ansible_collections/community/digitalocean/README.md new file mode 100644 index 00000000..a53f9cd1 --- /dev/null +++ b/ansible_collections/community/digitalocean/README.md @@ -0,0 +1,254 @@ +# DigitalOcean Community Collection + +[![coverage](https://img.shields.io/codecov/c/github/ansible-collections/community.digitalocean)](https://codecov.io/gh/ansible-collections/community.digitalocean) +[![black](https://github.com/ansible-collections/community.digitalocean/actions/workflows/black.yml/badge.svg)](https://github.com/ansible-collections/community.digitalocean/actions/workflows/black.yml) +[![integration](https://github.com/ansible-collections/community.digitalocean/actions/workflows/ansible-test-integration.yml/badge.svg)](https://github.com/ansible-collections/community.digitalocean/actions/workflows/ansible-test-integration.yml) +[![sanity](https://github.com/ansible-collections/community.digitalocean/actions/workflows/ansible-test-sanity.yml/badge.svg)](https://github.com/ansible-collections/community.digitalocean/actions/workflows/ansible-test-sanity.yml) +[![unit](https://github.com/ansible-collections/community.digitalocean/actions/workflows/ansible-test-unit.yml/badge.svg)](https://github.com/ansible-collections/community.digitalocean/actions/workflows/ansible-test-unit.yml) + +This collection contains modules and plugins to assist in automating [DigitalOcean](https://www.digitalocean.com) infrastructure and API interactions with Ansible. + +## Included content + +- [digital_ocean](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_module.html) – Create/delete a droplet/SSH_key in DigitalOcean +- [digital_ocean_account_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_account_facts_module.html) – Gather information about DigitalOcean User account +- [digital_ocean_account_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_account_info_module.html) – Gather information about DigitalOcean User account +- [digital_ocean_balance_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_balance_info_module.html) – Display DigitalOcean customer balance +- [digital_ocean_block_storage](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_block_storage_module.html) – Create/destroy or attach/detach Block Storage volumes in DigitalOcean +- [digital_ocean_cdn_endpoints_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_cdn_endpoints_info_module.html) – Gather information about DigitalOcean CDN Endpoints +- [digital_ocean_cdn_endpoints](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_cdn_endpoints_module.html) – Create and delete DigitalOcean CDN Endpoints +- [digital_ocean_certificate](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_certificate_module.html) – Manage certificates in DigitalOcean. +- [digital_ocean_certificate_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_certificate_facts_module.html) – Gather information about DigitalOcean certificates +- [digital_ocean_certificate_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_certificate_info_module.html) – Gather information about DigitalOcean certificates +- [digital_ocean_database_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_database_info_module.html) – Gather information about DigitalOcean databases +- [digital_ocean_database](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_database_module.html) – Create and delete DigitalOcean databases +- [digital_ocean_domain](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_domain_module.html) – Create/delete a DNS domain in DigitalOcean +- [digital_ocean_domain_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_domain_facts_module.html) – Gather information about DigitalOcean Domains +- [digital_ocean_domain_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_domain_info_module.html) – Gather information about DigitalOcean Domains +- [digital_ocean_domain_record_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_domain_record_info_module.html) – Gather information about DigitalOcean domain records +- [digital_ocean_domain_record](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_domain_record_module.html) – Create and delete DigitalOcean Domain Records +- [digital_ocean_droplet](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_droplet_module.html) – Create and delete a DigitalOcean droplet +- [digital_ocean_droplet_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_droplet_info_module.html) - Gather information about DigitalOcean Droplets +- [digital_ocean_firewall](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_firewall_module.html) – Create and delete DigitalOcean firewalls +- [digital_ocean_firewall_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_firewall_facts_module.html) – Gather information about DigitalOcean firewalls +- [digital_ocean_firewall_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_firewall_info_module.html) – Gather information about DigitalOcean firewalls +- [digital_ocean_floating_ip](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_floating_ip_module.html) – Manage DigitalOcean Floating IPs +- [digital_ocean_floating_ip_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_floating_ip_facts_module.html) – DigitalOcean Floating IPs information +- [digital_ocean_floating_ip_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_floating_ip_info_module.html) – DigitalOcean Floating IPs information +- [digital_ocean_image_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_image_facts_module.html) – Gather information about DigitalOcean images +- [digital_ocean_image_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_image_info_module.html) – Gather information about DigitalOcean images +- [digital_ocean_kubernetes_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_kubernetes_info_module.html) – Gather information about DigitalOcean Kubernetes clusters +- [digital_ocean_kubernetes](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_kubernetes_module.html) – Create and delete DigitalOcean Kubernetes clusters +- [digital_ocean_load_balancer](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_load_balancer_module.html) – Create and delete DigitalOcean load balancers +- [digital_ocean_load_balancer_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_load_balancer_facts_module.html) – Gather information about DigitalOcean load balancers +- [digital_ocean_load_balancer_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_load_balancer_info_module.html) – Gather information about DigitalOcean load balancers +- [digital_ocean_monitoring_alerts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_monitoring_alerts_module.html) – Create and delete DigitalOcean Monitoring alerts +- [digital_ocean_monitoring_alerts_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_monitoring_alerts_info_module.html) – Gather information about DigitalOcean Monitoring alerts +- [digital_ocean_project_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_load_project_info_module.html) – Gather information about DigitalOcean projects +- [digital_ocean_project](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_load_project_module.html) – Manage DigitalOcean projects +- [digital_ocean_region_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_region_facts_module.html) – Gather information about DigitalOcean regions +- [digital_ocean_region_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_region_info_module.html) – Gather information about DigitalOcean regions +- [digital_ocean_size_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_size_facts_module.html) – Gather information about DigitalOcean Droplet sizes +- [digital_ocean_size_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_size_info_module.html) – Gather information about DigitalOcean Droplet sizes +- [digital_ocean_snapshot_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_snapshot_facts_module.html) – Gather information about DigitalOcean Snapshot +- [digital_ocean_snapshot_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_snapshot_info_module.html) – Gather information about DigitalOcean Snapshot +- [digital_ocean_snapshot](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_snapshot_module.html) – Manage DigitalOcean Snapshots +- [digital_ocean_spaces_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_spaces_info_module.html) – Gather information about DigitalOcean Spaces +- [digital_ocean_spaces](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_spaces_module.html) – Manage DigitalOcean Spaces +- [digital_ocean_sshkey](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_sshkey_module.html) – Manage DigitalOcean SSH keys +- [digital_ocean_sshkey_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_sshkey_facts_module.html) – DigitalOcean SSH keys facts +- [digital_ocean_sshkey_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_sshkey_info_module.html) – Gather information about DigitalOcean SSH keys +- [digital_ocean_tag](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_tag_module.html) – Create and remove tag(s) to DigitalOcean resource. +- [digital_ocean_tag_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_tag_facts_module.html) – Gather information about DigitalOcean tags +- [digital_ocean_tag_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_tag_info_module.html) – Gather information about DigitalOcean tags +- [digital_ocean_volume_facts](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_volume_facts_module.html) – Gather information about DigitalOcean volumes +- [digital_ocean_volume_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_volume_info_module.html) – Gather information about DigitalOcean volumes +- [digital_ocean_vpc](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_vpc_module.html) – Create and delete DigitalOcean VPCs +- [digital_ocean_vpc_info](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_vpc_info_module.html) – Gather information about DigitalOcean VPCs +- [digitalocean](https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digitalocean_inventory.html) – DigitalOcean Inventory Plugin + +## Installation and Usage + +### Requirements + +The collection is tested and supported with: + +- ansible >= 2.9.10 or ansible-core >= 2.11 (as well as the [devel branch](https://github.com/ansible/ansible)) +- python >= 3.6 + +### Installing the Collection from Ansible Galaxy + +Before using the DigitalOcean collection, you need to install it with the Ansible Galaxy CLI: + + ansible-galaxy collection install community.digitalocean + +You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: community.digitalocean +``` + +### Using modules from the DigitalOcean Collection in your playbooks + +It's preferable to use content in this collection using their Fully Qualified Collection Namespace (FQCN), for example `community.digitalocean.digital_ocean_droplet`: + +```yaml +--- +- hosts: localhost + gather_facts: false + connection: local + + vars: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + + # You can also default the value of a variable for every DO module using module_defaults + # module_defaults: + # group/community.digitalocean.all: + # oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + + tasks: + - name: Create SSH key + community.digitalocean.digital_ocean_sshkey: + oauth_token: "{{ oauth_token }}" + name: mykey + ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example" + state: present + register: my_ssh_key + + - name: Create a new Droplet + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ oauth_token }}" + state: present + name: mydroplet + unique_name: true + size: s-1vcpu-1gb + region: sfo3 + image: ubuntu-20-04-x64 + wait_timeout: 500 + ssh_keys: + - "{{ my_ssh_key.data.ssh_key.id }}" + register: my_droplet + + - name: Show Droplet info + ansible.builtin.debug: + msg: | + Droplet ID is {{ my_droplet.data.droplet.id }} + First Public IPv4 is {{ (my_droplet.data.droplet.networks.v4 | selectattr('type', 'equalto', 'public')).0.ip_address | default('<none>', true) }} + First Private IPv4 is {{ (my_droplet.data.droplet.networks.v4 | selectattr('type', 'equalto', 'private')).0.ip_address | default('<none>', true) }} + + - name: Tag a resource; creating the tag if it does not exist + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ oauth_token }}" + name: "{{ item }}" + resource_id: "{{ my_droplet.data.droplet.id }}" + state: present + loop: + - staging + - dbserver +``` + +If upgrading older playbooks which were built prior to Ansible 2.10 and this collection's existence, you can also define `collections` in your play and refer to this collection's modules as you did in Ansible 2.9 and below, as in this example: + +```yaml +--- +- hosts: localhost + gather_facts: false + connection: local + + collections: + - community.digitalocean + + tasks: + - name: Create ssh key + digital_ocean_sshkey: + oauth_token: "{{ oauth_token }}" + ... +``` + +## Testing and Development + +If you want to develop new content for this collection or improve what's already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATHS`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there. + +Alternatively, to develop completely out of `~/src/ansible-dev`, one could: + + mkdir -p ~/src/ansible-dev + cd ~/src/ansible-dev + python3 -m venv venv + source venv/bin/activate + git clone https://github.com/ansible/ansible.git + pip install --requirement ansible/requirements.txt + pip install kubernetes + source ansible/hacking/env-setup + export ANSIBLE_COLLECTIONS_PATHS="~/src/ansible-dev/ansible_collections" + ansible-galaxy collection install community.digitalocean community.general + +This gives us a self-contained environment in `~/src/ansible-dev` consisting of Python, Ansible, and this collection (located in `~/src/ansible-dev/ansible_collections/community/digitalocean`). +This collection requires functionality from `community.general`, and as such, we install it as well. + +If you would like to contribute any changes which you have made to the collection, you will have to push them to your fork. +If you do not have a fork yet, you can create one [here](https://github.com/ansible-collections/community.digitalocean/fork). +Once you have a fork: + + cd ~/src/ansible-dev/ansible_collections/community/digitalocean + git remote add origin git@github.com:{your fork organization}/community.digitalocean.git + git checkout -b my-awesome-fixes + git commit -am "My awesome fixes" + git push -u origin my-awesome-fixes + +Now, you should be ready to create a [Pull Request](https://github.com/ansible-collections/community.digitalocean/pulls). + +### Testing with `ansible-test` + +The `tests` directory inside the collection root contains configuration for +running unit, sanity, and integration tests using [`ansible-test`](https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html). + +You can run the collection's test suites with the commands: + + ansible-test units --venv --python 3.9 + ansible-test sanity --venv --python 3.9 + ansible-test integration --venv --python 3.9 + +Replace `--venv` with `--docker` if you'd like to use Docker for the testing runtime environment. + +Note: To run integration tests, you must add an [`tests/integration/integration_config.yml`](https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html#integration-config-yml) file with a valid DigitalOcean API Key (variable `do_api_key`), +AWS Access ID and Secret Key (variables `aws_access_key_id` and `aws_secret_access_key`, respectively). The AWS +variables are used for the DigitalOcean Spaces and CDN Endpoints integration tests. + +## Release notes + +See the [changelog](https://github.com/ansible-collections/community.digitalocean/blob/main/CHANGELOG.rst). + +### Release process + +Releases are automatically built and pushed to Ansible Galaxy for any new tag. Before tagging a release, make sure to do the following: + + 1. Update `galaxy.yml` and this README's `requirements.yml` example with the new `version` for the collection. Make sure all new modules have references above. + 1. Update the CHANGELOG: + 1. Make sure you have [`antsibull-changelog`](https://pypi.org/project/antsibull-changelog/) installed. + 1. Make sure there are fragments for all known changes in `changelogs/fragments`. + 1. Run `antsibull-changelog release`. + 1. Don't forget to add new folks to `galaxy.yml`. + 1. Commit the changes and create a PR with the changes. Wait for tests to pass, then merge it once they have. + 1. Tag the version in Git and push to GitHub. + 1. Determine the next version (collections follow [semver](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections.html#collection-versions) semantics) by listing tags or looking at the [releases](https://github.com/ansible-collections/community.digitalocean/releases). + 1. List tags with `git tag --list` + 1. Create a new tag with `git tag 1.2.3` + 1. Push tags upstream with `git push upstream --tags` + +After the version is published, verify it exists on the [DigitalOcean Collection Galaxy page](https://galaxy.ansible.com/community/digitalocean). + +## More information + + - [DigitalOcean Working Group](https://github.com/ansible/community/wiki/Digital-Ocean) + - [Ansible Collection overview](https://github.com/ansible-collections/overview) + - [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) + - [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) + - [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) + +## Licensing + +GNU General Public License v3.0 or later. + +See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/ansible_collections/community/digitalocean/changelogs/.gitignore b/ansible_collections/community/digitalocean/changelogs/.gitignore new file mode 100644 index 00000000..6be6b533 --- /dev/null +++ b/ansible_collections/community/digitalocean/changelogs/.gitignore @@ -0,0 +1 @@ +/.plugin-cache.yaml diff --git a/ansible_collections/community/digitalocean/changelogs/changelog.yaml b/ansible_collections/community/digitalocean/changelogs/changelog.yaml new file mode 100644 index 00000000..8eed4ea0 --- /dev/null +++ b/ansible_collections/community/digitalocean/changelogs/changelog.yaml @@ -0,0 +1,516 @@ +ancestor: null +releases: + 0.1.0: + changes: + release_summary: Initial release of the collection after extracing the modules + from `community.general <https://github.com/ansible-collections/community.general/>`_. + fragments: + - 0.1.0.yml + release_date: '2020-07-07' + 1.0.0: + changes: + bugfixes: + - Sanity test documentation fixes (https://github.com/ansible-collections/community.digitalocean/pull/3). + - Update docs examples to use FQCN (https://github.com/ansible-collections/community.digitalocean/issues/14). + fragments: + - 14-docs-fqcn.yaml + - 3-sanity-docs-fixes.yaml + release_date: '2020-08-17' + 1.1.0: + changes: + bugfixes: + - digital_ocean_certificate_info - fix retrieving certificate by ID (https://github.com/ansible-collections/community.digitalocean/issues/35). + - digital_ocean_domain - module is now idempotent when called without IP (https://github.com/ansible-collections/community.digitalocean/issues/21). + - digital_ocean_load_balancer_info - fix retrieving load balancer by ID (https://github.com/ansible-collections/community.digitalocean/issues/35). + minor_changes: + - digital_ocean_block_storage - included ability to resize Block Storage Volumes + (https://github.com/ansible-collections/community.digitalocean/issues/38). + fragments: + - 22-digital_ocean_domain-idempotent.yml + - 38-resize-volumes.yml + - 49-fix-lb-and-cert-info.yaml + modules: + - description: Manage DigitalOcean domain records + name: digital_ocean_domain_record + namespace: '' + - description: Manage cloud firewalls within DigitalOcean + name: digital_ocean_firewall + namespace: '' + plugins: + inventory: + - description: DigitalOcean Inventory Plugin + name: digitalocean + namespace: null + release_date: '2021-04-01' + 1.1.1: + changes: + bugfixes: + - digitalocean - Drop collection version from README.md (https://github.com/ansible-collections/community.digitalocean/issues/63). + fragments: + - 63-readme-version.yml + release_date: '2021-04-18' + 1.10.0: + changes: + bugfixes: + - digital_ocean_block_storage - fix block volumes detach idempotency (https://github.com/ansible-collections/community.digitalocean/issues/149). + - digital_ocean_droplet - ensure "active" state before issuing "power on" action + (https://github.com/ansible-collections/community.digitalocean/issues/150) + - digital_ocean_droplet - power on should poll/wait, resize should support "active" + state (https://github.com/ansible-collections/community.digitalocean/pull/143). + - digital_ocean_load_balancer - C(droplet_ids) are not required when C(state=absent) + is chosen (https://github.com/ansible-collections/community.digitalocean/pull/147). + - digital_ocean_load_balancer - when C(state=absent) is chosen the API returns + an empty response (https://github.com/ansible-collections/community.digitalocean/pull/147). + minor_changes: + - digital_ocean_kubernetes - adding the C(taints), C(auto_scale), C(min_nodes) + and C(max_nodes) parameters to the C(node_pools) definition (https://github.com/ansible-collections/community.digitalocean/issues/157). + fragments: + - 143-droplet-resize-wait-active.yaml + - 147-load-balancer-fixes.yaml + - 149-block-detach-not-idempotent.yaml + - 150-droplet-active-power-on.yaml + - 157-doks-auto-scale.yaml + modules: + - description: Create and delete DigitalOcean CDN Endpoints + name: digital_ocean_cdn_endpoints + namespace: '' + - description: Gather information about DigitalOcean CDN Endpoints + name: digital_ocean_cdn_endpoints_info + namespace: '' + - description: Manage DigitalOcean Load Balancers + name: digital_ocean_load_balancer + namespace: '' + - description: Create and delete DigitalOcean Monitoring alerts + name: digital_ocean_monitoring_alerts + namespace: '' + - description: Gather information about DigitalOcean Monitoring alerts + name: digital_ocean_monitoring_alerts_info + namespace: '' + release_date: '2021-09-11' + 1.11.0: + changes: + bugfixes: + - Adding missing status badges for black and unit tests (https://github.com/ansible-collections/community.digitalocean/pull/164). + - Documentation URLs are fixed for the C(digital_ocean_domain_record) and C(digital_ocean_droplet_info) + modules (https://github.com/ansible-collections/community.digitalocean/pull/163). + - Serializing the cloud integration tests (https://github.com/ansible-collections/community.digitalocean/pull/165). + - digital_ocean_floating_ip - make floating ip return data idempotent (https://github.com/ansible-collections/community.digitalocean/pull/162). + - digitalocean inventory - enforce the C(timeout) parameter (https://github.com/ansible-collections/community.digitalocean/issues/168). + minor_changes: + - digitalocean inventory script - add support for Droplet tag filtering (https://github.com/ansible-collections/community.digitalocean/issues/7). + fragments: + - 162-floating-ip-data-idempotency.yaml + - 163-documentation-broken-links.yaml + - 164-add-badges.yaml + - 165-serialize-integration-tests.yml + - 168-inventory-timeout.yaml + - 7-inventory-script-droplet-tags.yaml + release_date: '2021-10-23' + 1.12.0: + changes: + bugfixes: + - Update the tests so that they only run once (https://github.com/ansible-collections/community.digitalocean/issues/186). + - 'digital_ocean_droplet - fix resizing with C(state: active) does not actually + turn Droplet on (https://github.com/ansible-collections/community.digitalocean/issues/140).' + - digital_ocean_kubernetes - fix return value consistency (https://github.com/ansible-collections/community.digitalocean/issues/174). + minor_changes: + - digital_ocean_block_storage - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). + - digital_ocean_database - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). + - digital_ocean_domain - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). + - digital_ocean_droplet - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). + - digital_ocean_droplet - adding ability to apply and remove firewall by using + droplet module (https://github.com/ansible-collections/community.digitalocean/issues/159). + - digital_ocean_droplet - require unique_name for state=absent to avoid unintentional + droplet deletions. + - digital_ocean_firewall - inbound_rules and outbound_rules are no longer required + for firewall removal (https://github.com/ansible-collections/community.digitalocean/issues/181). + - digital_ocean_floating_ip - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). + - digital_ocean_floating_ip - adding attach and detach states to floating ip + module (https://github.com/ansible-collections/community.digitalocean/issues/170). + - digital_ocean_load_balancer - adding Project support (https://github.com/ansible-collections/community.digitalocean/issues/171). + - digitalocean integration tests - adding integration tests for CDN Endpoints + (https://github.com/ansible-collections/community.digitalocean/issues/179). + fragments: + - 140-resize-state-active.yaml + - 159-add-droplet-firewall.yaml + - 170-add-flip-states.yaml + - 171-add-project-support.yaml + - 174-return-value-fix.yml + - 179-cdn-integration-tests.yaml + - 181-firewall-destroy-fix.yaml + - 186-tests-running-twice.yaml + release_date: '2021-11-06' + 1.13.0: + changes: + minor_changes: + - Set Python 3.9 as the C(python-version) and C(target-python-version) in the + integration, sanity, and unit tests for Ansible > 2.9 (3.8 otherwise). + - digital_ocean_droplet - allow the user to override the Droplet action and + status polling interval (https://github.com/ansible-collections/community.digitalocean/issues/194). + - digital_ocean_kubernetes - adding support for HA control plane (https://github.com/ansible-collections/community.digitalocean/issues/190). + fragments: + - 190-kubernetes-ha.yaml + - 194-droplet-sleep-variable.yaml + release_date: '2021-12-10' + 1.14.0: + changes: + bugfixes: + - Update README.md with updated Droplet examples (https://github.com/ansible-collections/community.digitalocean/issues/199). + - digital_ocean_cdn_endpoints - defaulting optional string parameters as strings + (https://github.com/ansible-collections/community.digitalocean/issues/205). + - digital_ocean_cdn_endpoints - updating Spaces endpoint for the integration + test (https://github.com/ansible-collections/community.digitalocean/issues/205). + - digital_ocean_droplet - ensure that Droplet creation is successful (https://github.com/ansible-collections/community.digitalocean/issues/197). + - digital_ocean_droplet - fixing project assignment for the C(unique_name=False) + case (https://github.com/ansible-collections/community.digitalocean/issues/201). + - digital_ocean_droplet - update Droplet examples (https://github.com/ansible-collections/community.digitalocean/issues/199). + minor_changes: + - digital_ocean_kubernetes_info - switching C(changed=True) to C(changed=False) + since getting information is read-only in nature (https://github.com/ansible-collections/community.digitalocean/issues/204). + fragments: + - 199-update-droplet-docs-examples.yaml + - 201-droplet-unique-name.yaml + - 204-kubernetes-tags-and-info.yaml + - 205-cdn-endpoints.yaml + release_date: '2021-12-20' + 1.15.0: + changes: + bugfixes: + - digital_ocean_droplet - move Droplet data under "droplet" key in returned + payload (https://github.com/ansible-collections/community.digitalocean/issues/211). + fragments: + - 211-droplet-data-key.yaml + modules: + - description: Create and remove DigitalOcean Spaces. + name: digital_ocean_spaces + namespace: '' + - description: List DigitalOcean Spaces. + name: digital_ocean_spaces_info + namespace: '' + release_date: '2022-01-19' + 1.15.1: + changes: + bugfixes: + - digital_ocean_droplet - fix reporting of changed state when ``firewall`` argument + is present (https://github.com/ansible-collections/community.digitalocean/pull/219). + minor_changes: + - Updates DigitalOcean API documentation links to current domain with working + URL anchors (https://github.com/ansible-collections/community.digitalocean/issues/223). + fragments: + - 219-droplet-firewall-changed-reporting.yaml + - 223-fix-digitalocean-api-documentation-links.yaml + release_date: '2022-02-17' + 1.16.0: + changes: + bugfixes: + - digital_ocean_kubernetes - add missing elements type to C(node_pools.tags) + and C(node_pools.taints) options (https://github.com/ansible-collections/community.digitalocean/issues/232). + minor_changes: + - black test - added a 15 minute timeout (https://github.com/ansible-collections/community.digitalocean/issues/228). + - digital_ocean_domain - add support for IPv6 apex domain records (https://github.com/ansible-collections/community.digitalocean/issues/226). + - integration tests - added a 120 minute timeout (https://github.com/ansible-collections/community.digitalocean/issues/228). + - sanity and unit tests - added a 30 minute timeout (https://github.com/ansible-collections/community.digitalocean/issues/228). + fragments: + - 226-ipv6-apex-domain-records.yml + - 228-integration-test-timeouts.yml + - 232-kubernetes-documentation-elements.yml + modules: + - description: Gather information about DigitalOcean domain records + name: digital_ocean_domain_record_info + namespace: '' + release_date: '2022-03-19' + 1.17.0: + changes: + minor_changes: + - digital_ocean - parameterize the DigitalOcean API base url (https://github.com/ansible-collections/community.digitalocean/issues/237). + fragments: + - 237-parameterize-do-api-baseurl.yaml + release_date: '2022-04-28' + 1.18.0: + changes: + minor_changes: + - ci - adding stable-2.13 to sanity and unit testing (https://github.com/ansible-collections/community.digitalocean/issues/239). + - digital_ocean_spaces - set C(no_log=True) for C(aws_access_key_id) parameter + (https://github.com/ansible-collections/community.digitalocean/issues/243). + - digital_ocean_spaces_info - set C(no_log=True) for C(aws_access_key_id) parameter + (https://github.com/ansible-collections/community.digitalocean/issues/243). + fragments: + - 239-ci-stable-2.13.yaml + - 243-no-log-spaces-access-key-id.yaml + release_date: '2022-05-03' + 1.19.0: + changes: + bugfixes: + - digital_ocean_cdn_endpoints - remove non-API parameters before posting to + the API (https://github.com/ansible-collections/community.digitalocean/issues/252). + - digital_ocean_cdn_endpoints - use the correct module name in the C(EXAMPLES) + (https://github.com/ansible-collections/community.digitalocean/issues/251). + minor_changes: + - digital_ocean - reference C(DO_API_TOKEN) consistently in module documentation + and examples (https://github.com/ansible-collections/community.digitalocean/issues/248). + fragments: + - 248-oauth-token-consistency.yaml + - 251-cdn-endpoints-examples-wrong-module.yaml + - 252-cdn-endpoints-http-500.yaml + release_date: '2022-05-11' + 1.2.0: + changes: + bugfixes: + - digital_ocean inventory script - fail cleaner on invalid ``HOST`` argument + to ``--host`` option (https://github.com/ansible-collections/community.digitalocean/pull/44). + - digital_ocean inventory script - implement unimplemented ``use_private_network`` + option and register missing ``do_ip_address``, ``do_private_ip_address`` host + vars (https://github.com/ansible-collections/community.digitalocean/pull/45/files). + - digital_ocean inventory script - return JSON consistent with specification + with ``--host`` (https://github.com/ansible-collections/community.digitalocean/pull/44). + - digital_ocean_domain - return zone records when creating a new zone (https://github.com/ansible-collections/community.digitalocean/issues/46). + - digital_ocean_droplet - add missing ``required=True`` on ``do_oauth_token`` + in ``argument_spec`` (https://github.com/ansible-collections/community.digitalocean/issues/13). + - digital_ocean_floating_ip - fixes idempotence (https://github.com/ansible-collections/community.digitalocean/issues/5). + minor_changes: + - digital_ocean - ``ssh_key_ids`` list entries are now validated to be strings + (https://github.com/ansible-collections/community.digitalocean/issues/13). + - digital_ocean_droplet - ``ssh_keys``, ``tags``, and ``volumes`` list entries + are now validated to be strings (https://github.com/ansible-collections/community.digitalocean/issues/13). + - digital_ocean_droplet - adding ``active`` and ``inactive`` states (https://github.com/ansible-collections/community.digitalocean/issues/23). + - digital_ocean_droplet - adds Droplet resize functionality (https://github.com/ansible-collections/community.digitalocean/issues/4). + fragments: + - 13-fix-sanity-tests.yaml + - 23-add-active-inactive-droplet.yaml + - 4-droplet-resize.yaml + - 44-fixes-inv-script-host-option.yaml + - 45-fix-use_private_network.yaml + - 46-fix-domain-create-return-records.yaml + - 5-fix-floating-ip-idempotence.yaml + modules: + - description: Display DigitalOcean customer balance + name: digital_ocean_balance_info + namespace: '' + release_date: '2021-05-02' + 1.20.0: + changes: + bugfixes: + - digital_ocean_droplet - fix regression in droplet deletion where ``name`` + and ``unique_name`` (set to true) are required and ``id`` alone is insufficient + (though ``id`` is sufficient to uniquely identify a droplet for deletion). + (https://github.com/ansible-collections/community.digitalocean/issues/260) + - digital_ocean_droplet - fix regression where droplet info (for example networking) + doesn't update when waiting during creation unless ``unique_name`` is set + to true (https://github.com/ansible-collections/community.digitalocean/issues/220). + minor_changes: + - digital_ocean_cdn_endpoints - update Spaces endpoint and add a few delays + to the integration test (https://github.com/ansible-collections/community.digitalocean/issues/267). + - digital_ocean_load_balancer - Allow creating a load balancer and associating + droplets by tag as an alternative to ``droplet_ids``. + fragments: + - 258-load-balancer-with-tag.yml + - 261-fix_deleting_by_id_only.yaml + - 265-get_updated_droplet_info_on_create_wait_by_id.yaml + - 267-update-cdn-endpoints.yaml + release_date: '2022-06-10' + 1.21.0: + changes: + bugfixes: + - digital_ocean_droplet - if the JSON response lacks a key and the associated + variable is set to ``None``, then don't treat that variable like a ``dict`` + and call ``get()`` on it without first testing it (https://github.com/ansible-collections/community.digitalocean/issues/272). + minor_changes: + - digital_ocean - add sanity test ignores for Ansible 2.12 and 2.13 (https://github.com/ansible-collections/community.digitalocean/issues/247). + fragments: + - 247-sanity-checks-2.12-13.yaml + - 273-Dont_call_get_on_None.yaml + release_date: '2022-06-29' + 1.22.0: + changes: + minor_changes: + - collection - added an action group C(community.digitalocean.all) for use with + module defaults (https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html) + (https://github.com/ansible-collections/community.digitalocean/issues/281). + - digital_ocean_vpc - add C(vpc) key to returned VPC data on create (https://github.com/ansible-collections/community.digitalocean/issues/276). + - integration tests - perform integration testing on all modules for changes + in C(plugins/module_utils) or by changed module in C(plugins/modules) (https://github.com/ansible-collections/community.digitalocean/issues/286). + - integration tests - split the integration tests by module and run them serially + (https://github.com/ansible-collections/community.digitalocean/issues/280). + fragments: + - 276-vpc-inconsistent-data-return.yml + - 281-default-all-action-group.yml + - 286-refactor-pr-integration-testing.yaml + release_date: '2022-10-03' + 1.23.0: + changes: + bugfixes: + - inventory plugin - bugfix for baseurl parameter (https://github.com/ansible-collections/community.digitalocean/pull/297). + - integration tests - add missing `environment` directive on pull request integration + testing (https://github.com/ansible-collections/community.digitalocean/issues/293). + minor_changes: + - digital_ocean_load_balancer - add support for C(size_unit) over deprecated + C(size); deprecate C(algorithm) completely (https://github.com/ansible-collections/community.digitalocean/issues/270). + - documentation - refresh the "Testing and Development" section of the C(README.md) + (https://github.com/ansible-collections/community.digitalocean/issues/268). + - integration tests - add a dedicated integration test for C(digital_ocean_database_info) + (https://github.com/ansible-collections/community.digitalocean/issues/289). + - integration tests - set pull request integration tests to run against branch + instead of last commit (https://github.com/ansible-collections/community.digitalocean/issues/291). + fragments: + - 268-update-dev-test-setup.yaml + - 270-load-balancer-size-unit.yaml + - 289-database-info-integration-test.yml + - 291-pr-integration-tests-branch.yaml + - 293-integration-test-pr-environment.yaml + release_date: '2022-12-29' + 1.3.0: + modules: + - description: Create and delete a DigitalOcean database + name: digital_ocean_database + namespace: '' + - description: Gather information about DigitalOcean databases + name: digital_ocean_database_info + namespace: '' + - description: Create and delete a DigitalOcean Kubernetes cluster + name: digital_ocean_kubernetes + namespace: '' + - description: Returns information about an existing DigitalOcean Kubernetes cluster + name: digital_ocean_kubernetes_info + namespace: '' + release_date: '2021-05-07' + 1.4.0: + changes: + bugfixes: + - digital_ocean_droplet_info - Fix documentation link for `digital_ocean_droplet_info` + (https://github.com/ansible-collections/community.digitalocean/pull/81). + - digitalocean - Fix return docs for digital_ocean_sshkey_info (https://github.com/ansible-collections/community.digitalocean/issues/56). + - digitalocean - Update README.md for K8s and databases (https://github.com/ansible-collections/community.digitalocean/pull/80). + fragments: + - 80-update-readme-k8s-dbs.yaml + - 81-fix-document-link-digital-ocean-droplet-info.yml + - 82-fix-sshkey-info.yaml + modules: + - description: Gather information about DigitalOcean Droplets + name: digital_ocean_droplet_info + namespace: '' + release_date: '2021-05-14' + 1.4.1: + changes: + bugfixes: + - digital_ocean_droplet - Add integration tests for Droplet active and inactive + states (https://github.com/ansible-collections/community.digitalocean/issues/66). + - digital_ocean_droplet - Fix Droplet inactive state (https://github.com/ansible-collections/community.digitalocean/issues/83). + fragments: + - 83-droplet-inactive.yaml + release_date: '2021-05-15' + 1.4.2: + changes: + bugfixes: + - digital_ocean_droplet - Fixed Droplet inactive state (https://github.com/ansible-collections/community.digitalocean/pull/88). + - digital_ocean_sshkey - Fixed SSH Key Traceback Issue (https://github.com/ansible-collections/community.digitalocean/issues/68). + fragments: + - 68-fix-sshkey-traceback.yaml + - 88-droplet-integration-tests.yaml + release_date: '2021-05-21' + 1.5.0: + changes: + bugfixes: + - digital_ocean_database - Fixed DB attribute settings (https://github.com/ansible-collections/community.digitalocean/issues/94). + - digital_ocean_database_info - Cleanup unused attribs (https://github.com/ansible-collections/community.digitalocean/pulls/100). + - digital_ocean_snapshot_info - Fix lookup of snapshot_info by_id (https://github.com/ansible-collections/community.digitalocean/issues/92). + - digital_ocean_tag - Fix tag idempotency (https://github.com/ansible-collections/community.digitalocean/issues/61). + minor_changes: + - digitalocean - Filter droplets in dynamic inventory plugin using arbitrary. + jinja2 expressions (https://github.com/ansible-collections/community.digitalocean/pull/96). + - digitalocean - Support templates in API tokens when using the dynamic inventory + plugin (https://github.com/ansible-collections/community.digitalocean/pull/98). + fragments: + - 100-fix-database-info.yaml + - 61-fix-tag-idempotency.yaml + - 92-snapshot-info-fix-get-by-id.yaml + - 94-fix-db-attribs.yaml + - 96-filter-droplets-through-jinja.yml + - 98-api-token-command.yml + release_date: '2021-05-26' + 1.5.1: + changes: + bugfixes: + - digitalocean inventory plugin - Wire up advertised caching functionality (https://github.com/ansible-collections/community.digitalocean/pull/97). + fragments: + - 97-wire-up-inventory-cache.yml + release_date: '2021-06-04' + 1.6.0: + changes: + bugfixes: + - digital_ocean_certificate_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). + - digital_ocean_domain_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). + - digital_ocean_firewall_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). + - digital_ocean_load_balancer_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). + - digital_ocean_tag_info - ensure return type is a list (https://github.com/ansible-collections/community.digitalocean/issues/55). + - digitalocean inventory plugin - attributes available to filters are limited + to explicitly required attributes and are prefixed with ``var_prefix`` (https://github.com/ansible-collections/community.digitalocean/pull/102). + fragments: + - 102-filters-prefixed-vars.yml + - 55-fix-info-module-return-type.yaml + modules: + - description: Manage a DigitalOcean project + name: digital_ocean_project + namespace: '' + - description: Gather information about DigitalOcean Projects + name: digital_ocean_project_info + namespace: '' + release_date: '2021-06-10' + 1.7.0: + changes: + bugfixes: + - digital_ocean_certificate - fixing integration test (https://github.com/ansible-collections/community.digitalocean/issues/114). + - digital_ocean_droplet - state `present` with `wait` was not waiting (https://github.com/ansible-collections/community.digitalocean/issues/116). + - digital_ocean_firewall - fixing integration test (https://github.com/ansible-collections/community.digitalocean/issues/114). + - digital_ocean_tag - fixing integration test (https://github.com/ansible-collections/community.digitalocean/issues/114). + - digitalocean - update README.md with project_info and project module (https://github.com/ansible-collections/community.digitalocean/pull/112). + minor_changes: + - digital_ocean_kubernetes - set "latest" as the default version for new clusters + (https://github.com/ansible-collections/community.digitalocean/issues/114). + fragments: + - 112-update-readme.yaml + - 114-fix-integration-tests.yaml + - 116-droplet-present-wait.yaml + modules: + - description: Create and delete DigitalOcean snapshots + name: digital_ocean_snapshot + namespace: '' + - description: Create and delete DigitalOcean VPCs + name: digital_ocean_vpc + namespace: '' + - description: Gather information about DigitalOcean VPCs + name: digital_ocean_vpc_info + namespace: '' + release_date: '2021-06-21' + 1.8.0: + changes: + bugfixes: + - digital_ocean - integration tests need community.general and jmespath (https://github.com/ansible-collections/community.digitalocean/issues/121). + - digital_ocean_firewall - fixed idempotence (https://github.com/ansible-collections/community.digitalocean/issues/122). + minor_changes: + - digital_ocean_database - add support for MongoDB (https://github.com/ansible-collections/community.digitalocean/issues/124). + fragments: + - 121-integration-tests.yaml + - 122-firewall-idempotence.yaml + - 124-add-mongodb.yaml + release_date: '2021-07-05' + 1.9.0: + changes: + bugfixes: + - digital_ocean_database - increase the database creation integration test timeout + (https://github.com/ansible-collections/community.digitalocean). + - digital_ocean_floating_ip - delete all Floating IPs initially during the integration + test run (https://github.com/ansible-collections/community.digitalocean/issues/129). + - digitalocean inventory - respect the TRANSFORM_INVALID_GROUP_CHARS configuration + setting (https://github.com/ansible-collections/community.digitalocean/pull/138). + - info modules - adding missing check mode support (https://github.com/ansible-collections/community.digitalocean/issues/139). + minor_changes: + - digital_ocean - running and enforcing psf/black in the codebase (https://github.com/ansible-collections/community.digitalocean/issues/136). + - digital_ocean_floating_ip_info - new integration test for the `digital_ocean_floating_ip_info` + module (https://github.com/ansible-collections/community.digitalocean/issues/130). + fragments: + - 131-floating-ip-tests.yaml + - 134-database-tests.yaml + - 134-psf-black.yaml + - 138-group-name-transformations.yaml + - 139-info-modules-check-mode.yaml + release_date: '2021-08-17' diff --git a/ansible_collections/community/digitalocean/changelogs/config.yaml b/ansible_collections/community/digitalocean/changelogs/config.yaml new file mode 100644 index 00000000..3ca6bc55 --- /dev/null +++ b/ansible_collections/community/digitalocean/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Community DigitalOcean +trivial_section_name: trivial diff --git a/ansible_collections/community/digitalocean/changelogs/fragments/.keep b/ansible_collections/community/digitalocean/changelogs/fragments/.keep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/changelogs/fragments/.keep diff --git a/ansible_collections/community/digitalocean/codecov.yml b/ansible_collections/community/digitalocean/codecov.yml new file mode 100644 index 00000000..bf61e9eb --- /dev/null +++ b/ansible_collections/community/digitalocean/codecov.yml @@ -0,0 +1,10 @@ +coverage: + status: + project: + default: + informational: true # Don't fail CI +# Don't count lines of code in these directories +ignore: + - tests +fixes: + - "ansible_collections/community/digitalocean/::" diff --git a/ansible_collections/community/digitalocean/meta/runtime.yml b/ansible_collections/community/digitalocean/meta/runtime.yml new file mode 100644 index 00000000..8ae239cd --- /dev/null +++ b/ansible_collections/community/digitalocean/meta/runtime.yml @@ -0,0 +1,120 @@ +requires_ansible: '>=2.9.10' +action_groups: + all: + - digital_ocean + - digital_ocean_account_facts + - digital_ocean_account_info + - digital_ocean_balance_info + - digital_ocean_block_storage + - digital_ocean_cdn_endpoints_info + - digital_ocean_cdn_endpoints + - digital_ocean_certificate + - digital_ocean_certificate_facts + - digital_ocean_certificate_info + - digital_ocean_database_info + - digital_ocean_database + - digital_ocean_domain + - digital_ocean_domain_facts + - digital_ocean_domain_info + - digital_ocean_domain_record_info + - digital_ocean_domain_record + - digital_ocean_droplet + - digital_ocean_droplet_info + - digital_ocean_firewall + - digital_ocean_firewall_facts + - digital_ocean_firewall_info + - digital_ocean_floating_ip + - digital_ocean_floating_ip_facts + - digital_ocean_floating_ip_info + - digital_ocean_image_facts + - digital_ocean_image_info + - digital_ocean_kubernetes_info + - digital_ocean_kubernetes + - digital_ocean_load_balancer + - digital_ocean_load_balancer_facts + - digital_ocean_load_balancer_info + - digital_ocean_monitoring_alerts + - digital_ocean_monitoring_alerts_info + - digital_ocean_project_info + - digital_ocean_project + - digital_ocean_region_facts + - digital_ocean_region_info + - digital_ocean_size_facts + - digital_ocean_size_info + - digital_ocean_snapshot_facts + - digital_ocean_snapshot_info + - digital_ocean_snapshot + - digital_ocean_spaces_info + - digital_ocean_spaces + - digital_ocean_sshkey + - digital_ocean_sshkey_facts + - digital_ocean_sshkey_info + - digital_ocean_tag + - digital_ocean_tag_facts + - digital_ocean_tag_info + - digital_ocean_volume_facts + - digital_ocean_volume_info + - digital_ocean_vpc + - digital_ocean_vpc_info +plugin_routing: + modules: + digital_ocean: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_account_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_certificate_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_domain_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_firewall_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_floating_ip_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_image_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_load_balancer_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_region_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_size_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_snapshot_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_sshkey_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_tag_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + digital_ocean_volume_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details + docker_image_facts: + deprecation: + removal_version: 2.0.0 + warning_text: see plugin documentation for details diff --git a/ansible_collections/community/digitalocean/plugins/doc_fragments/digital_ocean.py b/ansible_collections/community/digitalocean/plugins/doc_fragments/digital_ocean.py new file mode 100644 index 00000000..bc65ad38 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/doc_fragments/digital_ocean.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class ModuleDocFragment(object): + # Parameters for DigitalOcean modules + DOCUMENTATION = r""" +options: + baseurl: + description: + - DigitalOcean API base url. + type: str + default: https://api.digitalocean.com/v2 + oauth_token: + description: + - DigitalOcean OAuth token. + - "There are several other environment variables which can be used to provide this value." + - "i.e., - 'DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN' and 'OAUTH_TOKEN'" + type: str + aliases: [ api_token ] + timeout: + description: + - The timeout in seconds used for polling DigitalOcean's API. + type: int + default: 30 + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: yes +""" diff --git a/ansible_collections/community/digitalocean/plugins/inventory/digitalocean.py b/ansible_collections/community/digitalocean/plugins/inventory/digitalocean.py new file mode 100644 index 00000000..6aafb5f4 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/inventory/digitalocean.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c), Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +name: digitalocean +author: + - Janos Gerzson (@grzs) + - Tadej BorovÅ¡ak (@tadeboro) + - Max Truxa (@maxtruxa) +short_description: DigitalOcean Inventory Plugin +version_added: "1.1.0" +description: + - DigitalOcean (DO) inventory plugin. + - Acquires droplet list from DO API. + - Uses configuration file that ends with '(do_hosts|digitalocean|digital_ocean).(yaml|yml)'. +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation + - constructed + - inventory_cache +options: + plugin: + description: + - The name of the DigitalOcean Inventory Plugin, + this should always be C(community.digitalocean.digitalocean). + required: true + choices: ['community.digitalocean.digitalocean'] + api_token: + description: + - DigitalOcean OAuth token. + - Template expressions can be used in this field. + required: true + type: str + aliases: [ oauth_token ] + env: + - name: DO_API_TOKEN + attributes: + description: >- + Droplet attributes to add as host vars to each inventory host. + Check out the DO API docs for full list of attributes at + U(https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_droplets). + type: list + elements: str + default: + - id + - name + - networks + - region + - size_slug + var_prefix: + description: + - Prefix of generated varible names (e.g. C(tags) -> C(do_tags)) + type: str + default: 'do_' + pagination: + description: + - Maximum droplet objects per response page. + - If the number of droplets related to the account exceeds this value, + the query will be broken to multiple requests (pages). + - DigitalOcean currently allows a maximum of 200. + type: int + default: 200 + filters: + description: + - Filter hosts with Jinja templates. + - If no filters are specified, all hosts are added to the inventory. + type: list + elements: str + default: [] + version_added: '1.5.0' +""" + +EXAMPLES = r""" +# Using keyed groups and compose for hostvars +plugin: community.digitalocean.digitalocean +api_token: '{{ lookup("pipe", "./get-do-token.sh" }}' +attributes: + - id + - name + - memory + - vcpus + - disk + - size + - image + - networks + - volume_ids + - tags + - region +keyed_groups: + - key: do_region.slug + prefix: 'region' + separator: '_' + - key: do_tags | lower + prefix: '' + separator: '' +compose: + ansible_host: do_networks.v4 | selectattr('type','eq','public') + | map(attribute='ip_address') | first + class: do_size.description | lower + distro: do_image.distribution | lower +filters: + - '"kubernetes" in do_tags' + - 'do_region.slug == "fra1"' +""" + +import re +import json +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.inventory.group import to_safe_group_name +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import Request +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = "community.digitalocean.digitalocean" + + # Constructable methods use the following function to construct group names. By + # default, characters that are not valid in python variables, are always replaced by + # underscores. We are overriding this with a function that respects the + # TRANSFORM_INVALID_GROUP_CHARS configuration option and allows users to control the + # behavior. + _sanitize_group_name = staticmethod(to_safe_group_name) + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith( + ( + "do_hosts.yaml", + "do_hosts.yml", + "digitalocean.yaml", + "digitalocean.yml", + "digital_ocean.yaml", + "digital_ocean.yml", + ) + ): + valid = True + else: + self.display.vvv( + "Skipping due to inventory source file name mismatch. " + "The file name has to end with one of the following: " + "do_hosts.yaml, do_hosts.yml " + "digitalocean.yaml, digitalocean.yml, " + "digital_ocean.yaml, digital_ocean.yml." + ) + return valid + + def _template_option(self, option): + value = self.get_option(option) + self.templar.available_variables = {} + return self.templar.template(value) + + def _get_payload(self): + # request parameters + api_token = self._template_option("api_token") + headers = { + "Content-Type": "application/json", + "Authorization": "Bearer {0}".format(api_token), + } + + # build url + pagination = self.get_option("pagination") + url = "https://api.digitalocean.com/v2" + if self.get_option("baseurl"): + url = self.get_option("baseurl") + url += "/droplets?per_page=" + str(pagination) + + # send request(s) + self.req = Request(headers=headers, timeout=self.get_option("timeout")) + payload = [] + try: + while url: + self.display.vvv("Sending request to {0}".format(url)) + response = json.load(self.req.get(url)) + payload.extend(response["droplets"]) + url = response.get("links", {}).get("pages", {}).get("next") + except ValueError: + raise AnsibleParserError("something went wrong with JSON loading") + except (URLError, HTTPError) as error: + raise AnsibleParserError(error) + + return payload + + def _populate(self, records): + attributes = self.get_option("attributes") + var_prefix = self.get_option("var_prefix") + strict = self.get_option("strict") + host_filters = self.get_option("filters") + for record in records: + + host_name = record.get("name") + if not host_name: + continue + + host_vars = {} + for k, v in record.items(): + if k in attributes: + host_vars[var_prefix + k] = v + + if not self._passes_filters(host_filters, host_vars, host_name, strict): + self.display.vvv("Host {0} did not pass all filters".format(host_name)) + continue + + # add host to inventory + self.inventory.add_host(host_name) + + # set variables for host + for k, v in host_vars.items(): + self.inventory.set_variable(host_name, k, v) + + self._set_composite_vars( + self.get_option("compose"), + self.inventory.get_host(host_name).get_vars(), + host_name, + strict, + ) + + # set composed and keyed groups + self._add_host_to_composed_groups( + self.get_option("groups"), dict(), host_name, strict + ) + self._add_host_to_keyed_groups( + self.get_option("keyed_groups"), dict(), host_name, strict + ) + + def _passes_filters(self, filters, variables, host, strict=False): + if filters and isinstance(filters, list): + for template in filters: + try: + if not self._compose(template, variables): + return False + except Exception as e: + if strict: + raise AnsibleError( + "Could not evaluate host filter {0} for host {1}: {2}".format( + template, host, to_native(e) + ) + ) + # Better be safe and not include any hosts by accident. + return False + return True + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + + # cache settings + cache_key = self.get_cache_key(path) + use_cache = self.get_option("cache") and cache + update_cache = self.get_option("cache") and not cache + + records = None + if use_cache: + try: + records = self._cache[cache_key] + except KeyError: + update_cache = True + + if records is None: + records = self._get_payload() + + if update_cache: + self._cache[cache_key] = records + + self._populate(records) diff --git a/ansible_collections/community/digitalocean/plugins/module_utils/digital_ocean.py b/ansible_collections/community/digitalocean/plugins/module_utils/digital_ocean.py new file mode 100644 index 00000000..44ca3ccd --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/module_utils/digital_ocean.py @@ -0,0 +1,305 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Ansible Project 2017 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import os +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback + + +class Response(object): + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(to_text(self.info["body"])) + return None + try: + return json.loads(to_text(self.body)) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class DigitalOceanHelper: + baseurl = "https://api.digitalocean.com/v2" + + def __init__(self, module): + self.module = module + self.baseurl = module.params.get("baseurl", DigitalOceanHelper.baseurl) + self.timeout = module.params.get("timeout", 30) + self.oauth_token = module.params.get("oauth_token") + self.headers = { + "Authorization": "Bearer {0}".format(self.oauth_token), + "Content-type": "application/json", + } + + # Check if api_token is valid or not + response = self.get("account") + if response.status_code == 401: + self.module.fail_json( + msg="Failed to login using API token, please verify validity of API token." + ) + + def _url_builder(self, path): + if path[0] == "/": + path = path[1:] + return "%s/%s" % (self.baseurl, path) + + def send(self, method, path, data=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + if method == "DELETE": + if data == "null": + data = None + + resp, info = fetch_url( + self.module, + url, + data=data, + headers=self.headers, + method=method, + timeout=self.timeout, + ) + + return Response(resp, info) + + def get(self, path, data=None): + return self.send("GET", path, data) + + def put(self, path, data=None): + return self.send("PUT", path, data) + + def post(self, path, data=None): + return self.send("POST", path, data) + + def delete(self, path, data=None): + return self.send("DELETE", path, data) + + @staticmethod + def digital_ocean_argument_spec(): + return dict( + baseurl=dict( + type="str", required=False, default="https://api.digitalocean.com/v2" + ), + validate_certs=dict(type="bool", required=False, default=True), + oauth_token=dict( + no_log=True, + # Support environment variable for DigitalOcean OAuth Token + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN", "OAUTH_TOKEN"], + ), + required=False, + aliases=["api_token"], + ), + timeout=dict(type="int", default=30), + ) + + def get_paginated_data( + self, + base_url=None, + data_key_name=None, + data_per_page=40, + expected_status_code=200, + ): + """ + Function to get all paginated data from given URL + Args: + base_url: Base URL to get data from + data_key_name: Name of data key value + data_per_page: Number results per page (Default: 40) + expected_status_code: Expected returned code from DigitalOcean (Default: 200) + Returns: List of data + + """ + page = 1 + has_next = True + ret_data = [] + status_code = None + response = None + while has_next or status_code != expected_status_code: + required_url = "{0}page={1}&per_page={2}".format( + base_url, page, data_per_page + ) + response = self.get(required_url) + status_code = response.status_code + # stop if any error during pagination + if status_code != expected_status_code: + break + page += 1 + ret_data.extend(response.json[data_key_name]) + try: + has_next = ( + "pages" in response.json["links"] + and "next" in response.json["links"]["pages"] + ) + except KeyError: + # There's a bug in the API docs: GET v2/cdn/endpoints doesn't return a "links" key + has_next = False + + if status_code != expected_status_code: + msg = "Failed to fetch %s from %s" % (data_key_name, base_url) + if response: + msg += " due to error : %s" % response.json["message"] + self.module.fail_json(msg=msg) + + return ret_data + + +class DigitalOceanProjects: + def __init__(self, module, rest): + self.module = module + self.rest = rest + self.get_all_projects() + + def get_all_projects(self): + """Fetches all projects.""" + self.projects = self.rest.get_paginated_data( + base_url="projects?", data_key_name="projects" + ) + + def get_default(self): + """Fetches the default project. + + Returns: + error_message -- project fetch error message (or "" if no error) + project -- project dictionary representation (or {} if error) + """ + project = [ + project for project in self.projects if project.get("is_default", False) + ] + if len(project) == 0: + return "Unexpected error; no default project found", {} + if len(project) > 1: + return "Unexpected error; more than one default project", {} + return "", project[0] + + def get_by_id(self, id): + """Fetches the project with the given id. + + Returns: + error_message -- project fetch error message (or "" if no error) + project -- project dictionary representation (or {} if error) + """ + project = [project for project in self.projects if project.get("id") == id] + if len(project) == 0: + return "No project with id {0} found".format(id), {} + elif len(project) > 1: + return "Unexpected error; more than one project with the same id", {} + return "", project[0] + + def get_by_name(self, name): + """Fetches the project with the given name. + + Returns: + error_message -- project fetch error message (or "" if no error) + project -- project dictionary representation (or {} if error) + """ + project = [project for project in self.projects if project.get("name") == name] + if len(project) == 0: + return "No project with name {0} found".format(name), {} + elif len(project) > 1: + return "Unexpected error; more than one project with the same name", {} + return "", project[0] + + def assign_to_project(self, project_name, urn): + """Assign resource (urn) to project (name). + + Keyword arguments: + project_name -- project name to associate the resource with + urn -- resource URN (has the form do:resource_type:resource_id) + + Returns: + assign_status -- ok, not_found, assigned, already_assigned, service_down + error_message -- assignment error message (empty on success) + resources -- resources assigned (or {} if error) + + Notes: + For URN examples, see https://docs.digitalocean.com/reference/api/api-reference/#tag/Project-Resources + + Projects resources are identified by uniform resource names or URNs. + A valid URN has the following format: do:resource_type:resource_id. + + The following resource types are supported: + Resource Type | Example URN + Database | do:dbaas:83c7a55f-0d84-4760-9245-aba076ec2fb2 + Domain | do:domain:example.com + Droplet | do:droplet:4126873 + Floating IP | do:floatingip:192.168.99.100 + Load Balancer | do:loadbalancer:39052d89-8dd4-4d49-8d5a-3c3b6b365b5b + Space | do:space:my-website-assets + Volume | do:volume:6fc4c277-ea5c-448a-93cd-dd496cfef71f + """ + error_message, project = self.get_by_name(project_name) + if not project: + return "", error_message, {} + + project_id = project.get("id", None) + if not project_id: + return ( + "", + "Unexpected error; cannot find project id for {0}".format(project_name), + {}, + ) + + data = {"resources": [urn]} + response = self.rest.post( + "projects/{0}/resources".format(project_id), data=data + ) + status_code = response.status_code + json = response.json + if status_code != 200: + message = json.get("message", "No error message returned") + return ( + "", + "Unable to assign resource {0} to project {1} [HTTP {2}: {3}]".format( + urn, project_name, status_code, message + ), + {}, + ) + + resources = json.get("resources", []) + if len(resources) == 0: + return ( + "", + "Unexpected error; no resources returned (but assignment was successful)", + {}, + ) + if len(resources) > 1: + return ( + "", + "Unexpected error; more than one resource returned (but assignment was successful)", + {}, + ) + + status = resources[0].get( + "status", + "Unexpected error; no status returned (but assignment was successful)", + ) + return ( + status, + "Assigned {0} to project {1}".format(urn, project_name), + resources[0], + ) diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean.py new file mode 100644 index 00000000..1c33f2c7 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean.py @@ -0,0 +1,525 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean +short_description: Create/delete a droplet/SSH_key in DigitalOcean +deprecated: + removed_in: 2.0.0 # was Ansible 2.12 + why: Updated module to remove external dependency with increased functionality. + alternative: Use M(community.digitalocean.digital_ocean_droplet) instead. +description: + - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. +author: "Vincent Viallet (@zbal)" +options: + command: + description: + - Which target you want to operate on. + default: droplet + choices: ['droplet', 'ssh'] + type: str + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'active', 'absent', 'deleted'] + type: str + api_token: + description: + - DigitalOcean api token. + type: str + aliases: + - API_TOKEN + id: + description: + - Numeric, the droplet id you want to operate on. + aliases: ['droplet_id'] + type: int + name: + description: + - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. + type: str + unique_name: + description: + - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host + per name. Useful for idempotence. + type: bool + default: 'no' + size_id: + description: + - This is the slug of the size you would like the droplet created with. + type: str + image_id: + description: + - This is the slug of the image you would like the droplet created with. + type: str + region_id: + description: + - This is the slug of the region you would like your server to be created in. + type: str + ssh_key_ids: + description: + - Optional, array of SSH key (numeric) ID that you would like to be added to the server. + type: list + elements: str + virtio: + description: + - "Bool, turn on virtio driver in droplet for improved network and storage I/O." + type: bool + default: 'yes' + private_networking: + description: + - "Bool, add an additional, private network interface to droplet for inter-droplet communication." + type: bool + default: 'no' + backups_enabled: + description: + - Optional, Boolean, enables backups for your droplet. + type: bool + default: 'no' + user_data: + description: + - opaque blob of data which is made available to the droplet + type: str + ipv6: + description: + - Optional, Boolean, enable IPv6 for your droplet. + type: bool + default: 'no' + wait: + description: + - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. + type: bool + default: 'yes' + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + ssh_pub_key: + description: + - The public SSH key you want to add to your account. + type: str + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory. +requirements: + - "python >= 2.6" + - dopy +""" + + +EXAMPLES = r""" +# Ensure a SSH key is present +# If a key matches this name, will return the ssh key id and changed = False +# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False + +- name: Ensure a SSH key is present + community.digitalocean.digital_ocean: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: ssh + name: my_ssh_key + ssh_pub_key: 'ssh-rsa AAAA...' + +# Will return the droplet details including the droplet id (used for idempotence) +- name: Create a new Droplet + community.digitalocean.digital_ocean: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: droplet + name: mydroplet + size_id: 2gb + region_id: ams2 + image_id: fedora-19-x64 + wait_timeout: 500 + register: my_droplet + +- debug: + msg: "ID is {{ my_droplet.droplet.id }}" + +- debug: + msg: "IP is {{ my_droplet.droplet.ip_address }}" + +# Ensure a droplet is present +# If droplet id already exist, will return the droplet details and changed = False +# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True. + +- name: Ensure a droplet is present + community.digitalocean.digital_ocean: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: droplet + id: 123 + name: mydroplet + size_id: 2gb + region_id: ams2 + image_id: fedora-19-x64 + wait_timeout: 500 + +# Create a droplet with ssh key +# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids). +# Several keys can be added to ssh_key_ids as id1,id2,id3 +# The keys are used to connect as root to the droplet. + +- name: Create a droplet with ssh key + community.digitalocean.digital_ocean: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + ssh_key_ids: 123,456 + name: mydroplet + size_id: 2gb + region_id: ams2 + image_id: fedora-19-x64 +""" + +import os +import time +import traceback + +try: + from packaging.version import Version + + HAS_PACKAGING = True +except ImportError: + HAS_PACKAGING = False + +try: + # Imported as a dependency for dopy + import ansible.module_utils.six + + HAS_SIX = True +except ImportError: + HAS_SIX = False + +HAS_DOPY = False +try: + import dopy + from dopy.manager import DoError, DoManager + + # NOTE: Expressing Python dependencies isn't really possible: + # https://github.com/ansible/ansible/issues/62733#issuecomment-537098744 + if HAS_PACKAGING: + if Version(dopy.__version__) >= Version("0.3.2"): + HAS_DOPY = True + else: + if dopy.__version__ >= "0.3.2": # Naive lexographical check + HAS_DOPY = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +class TimeoutError(Exception): + def __init__(self, msg, id_): + super(TimeoutError, self).__init__(msg) + self.id = id_ + + +class JsonfyMixIn(object): + def to_json(self): + return self.__dict__ + + +class Droplet(JsonfyMixIn): + manager = None + + def __init__(self, droplet_json): + self.status = "new" + self.__dict__.update(droplet_json) + + def is_powered_on(self): + return self.status == "active" + + def update_attr(self, attrs=None): + if attrs: + for k, v in attrs.items(): + setattr(self, k, v) + networks = attrs.get("networks", {}) + for network in networks.get("v6", []): + if network["type"] == "public": + setattr(self, "public_ipv6_address", network["ip_address"]) + else: + setattr(self, "private_ipv6_address", network["ip_address"]) + else: + json = self.manager.show_droplet(self.id) + if json["ip_address"]: + self.update_attr(json) + + def power_on(self): + if self.status != "off": + raise AssertionError("Can only power on a closed one.") + json = self.manager.power_on_droplet(self.id) + self.update_attr(json) + + def ensure_powered_on(self, wait=True, wait_timeout=300): + if self.is_powered_on(): + return + if self.status == "off": # powered off + self.power_on() + + if wait: + end_time = time.monotonic() + wait_timeout + while time.monotonic() < end_time: + time.sleep(10) + self.update_attr() + if self.is_powered_on(): + if not self.ip_address: + raise TimeoutError("No ip is found.", self.id) + return + raise TimeoutError("Wait for droplet running timeout", self.id) + + def destroy(self): + return self.manager.destroy_droplet(self.id, scrub_data=True) + + @classmethod + def setup(cls, api_token): + cls.manager = DoManager(None, api_token, api_version=2) + + @classmethod + def add( + cls, + name, + size_id, + image_id, + region_id, + ssh_key_ids=None, + virtio=True, + private_networking=False, + backups_enabled=False, + user_data=None, + ipv6=False, + ): + private_networking_lower = str(private_networking).lower() + backups_enabled_lower = str(backups_enabled).lower() + ipv6_lower = str(ipv6).lower() + json = cls.manager.new_droplet( + name, + size_id, + image_id, + region_id, + ssh_key_ids=ssh_key_ids, + virtio=virtio, + private_networking=private_networking_lower, + backups_enabled=backups_enabled_lower, + user_data=user_data, + ipv6=ipv6_lower, + ) + droplet = cls(json) + return droplet + + @classmethod + def find(cls, id=None, name=None): + if not id and not name: + return False + + droplets = cls.list_all() + + # Check first by id. digital ocean requires that it be unique + for droplet in droplets: + if droplet.id == id: + return droplet + + # Failing that, check by hostname. + for droplet in droplets: + if droplet.name == name: + return droplet + + return False + + @classmethod + def list_all(cls): + json = cls.manager.all_active_droplets() + return list(map(cls, json)) + + +class SSH(JsonfyMixIn): + manager = None + + def __init__(self, ssh_key_json): + self.__dict__.update(ssh_key_json) + + update_attr = __init__ + + def destroy(self): + self.manager.destroy_ssh_key(self.id) + return True + + @classmethod + def setup(cls, api_token): + cls.manager = DoManager(None, api_token, api_version=2) + + @classmethod + def find(cls, name): + if not name: + return False + keys = cls.list_all() + for key in keys: + if key.name == name: + return key + return False + + @classmethod + def list_all(cls): + json = cls.manager.all_ssh_keys() + return list(map(cls, json)) + + @classmethod + def add(cls, name, key_pub): + json = cls.manager.new_ssh_key(name, key_pub) + return cls(json) + + +def core(module): + def getkeyordie(k): + v = module.params[k] + if v is None: + module.fail_json(msg="Unable to load %s" % k) + return v + + api_token = module.params["api_token"] + changed = True + command = module.params["command"] + state = module.params["state"] + + if command == "droplet": + Droplet.setup(api_token) + if state in ("active", "present"): + + # First, try to find a droplet by id. + droplet = Droplet.find(id=module.params["id"]) + + # If we couldn't find the droplet and the user is allowing unique + # hostnames, then check to see if a droplet with the specified + # hostname already exists. + if not droplet and module.params["unique_name"]: + droplet = Droplet.find(name=getkeyordie("name")) + + # If both of those attempts failed, then create a new droplet. + if not droplet: + droplet = Droplet.add( + name=getkeyordie("name"), + size_id=getkeyordie("size_id"), + image_id=getkeyordie("image_id"), + region_id=getkeyordie("region_id"), + ssh_key_ids=module.params["ssh_key_ids"], + virtio=module.params["virtio"], + private_networking=module.params["private_networking"], + backups_enabled=module.params["backups_enabled"], + user_data=module.params.get("user_data"), + ipv6=module.params["ipv6"], + ) + + if droplet.is_powered_on(): + changed = False + + droplet.ensure_powered_on( + wait=getkeyordie("wait"), wait_timeout=getkeyordie("wait_timeout") + ) + + module.exit_json(changed=changed, droplet=droplet.to_json()) + + elif state in ("absent", "deleted"): + # First, try to find a droplet by id. + droplet = Droplet.find(module.params["id"]) + + # If we couldn't find the droplet and the user is allowing unique + # hostnames, then check to see if a droplet with the specified + # hostname already exists. + if not droplet and module.params["unique_name"]: + droplet = Droplet.find(name=getkeyordie("name")) + + if not droplet: + module.exit_json(changed=False, msg="The droplet is not found.") + + droplet.destroy() + module.exit_json(changed=True) + + elif command == "ssh": + SSH.setup(api_token) + name = getkeyordie("name") + if state in ("active", "present"): + key = SSH.find(name) + if key: + module.exit_json(changed=False, ssh_key=key.to_json()) + key = SSH.add(name, getkeyordie("ssh_pub_key")) + module.exit_json(changed=True, ssh_key=key.to_json()) + + elif state in ("absent", "deleted"): + key = SSH.find(name) + if not key: + module.exit_json( + changed=False, + msg="SSH key with the name of %s is not found." % name, + ) + key.destroy() + module.exit_json(changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(choices=["droplet", "ssh"], default="droplet"), + state=dict( + choices=["active", "present", "absent", "deleted"], default="present" + ), + api_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=(env_fallback, ["DO_API_TOKEN", "DO_API_KEY"]), + ), + name=dict(type="str"), + size_id=dict(), + image_id=dict(), + region_id=dict(), + ssh_key_ids=dict(type="list", elements="str", no_log=False), + virtio=dict(type="bool", default=True), + private_networking=dict(type="bool", default=False), + backups_enabled=dict(type="bool", default=False), + id=dict(aliases=["droplet_id"], type="int"), + unique_name=dict(type="bool", default=False), + user_data=dict(default=None), + ipv6=dict(type="bool", default=False), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + ssh_pub_key=dict(type="str"), + ), + required_together=(["size_id", "image_id", "region_id"],), + mutually_exclusive=( + ["size_id", "ssh_pub_key"], + ["image_id", "ssh_pub_key"], + ["region_id", "ssh_pub_key"], + ), + required_one_of=(["id", "name"],), + ) + if not HAS_DOPY and not HAS_SIX: + module.fail_json( + msg="dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. " + "Make sure both dopy and six are installed." + ) + if not HAS_DOPY: + module.fail_json(msg="dopy >= 0.3.2 required for this module") + + try: + core(module) + except TimeoutError as e: + module.fail_json(msg=str(e), id=e.id) + except (DoError, Exception) as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_account_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_account_facts.py new file mode 100644 index 00000000..46ccd3e5 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_account_facts.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_account_info +short_description: Gather information about DigitalOcean User account +description: + - This module can be used to gather information about User account. + - This module was called C(digital_ocean_account_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" + +requirements: + - "python >= 2.6" + +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about user account + community.digitalocean.digital_ocean_account_info: + oauth_token: "{{ oauth_token }}" +""" + + +RETURN = r""" +data: + description: DigitalOcean account information + returned: success + type: dict + sample: { + "droplet_limit": 10, + "email": "testuser1@gmail.com", + "email_verified": true, + "floating_ip_limit": 3, + "status": "active", + "status_message": "", + "uuid": "aaaaaaaaaaaaaa" + } +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account") + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'account' information due to error : %s" + % response.json["message"] + ) + + module.exit_json(changed=False, data=response.json["account"]) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_account_facts", + "community.digitalocean.digital_ocean_account_facts", + ): + module.deprecate( + "The 'digital_ocean_account_facts' module has been renamed to 'digital_ocean_account_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_account_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_account_info.py new file mode 100644 index 00000000..46ccd3e5 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_account_info.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_account_info +short_description: Gather information about DigitalOcean User account +description: + - This module can be used to gather information about User account. + - This module was called C(digital_ocean_account_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" + +requirements: + - "python >= 2.6" + +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about user account + community.digitalocean.digital_ocean_account_info: + oauth_token: "{{ oauth_token }}" +""" + + +RETURN = r""" +data: + description: DigitalOcean account information + returned: success + type: dict + sample: { + "droplet_limit": 10, + "email": "testuser1@gmail.com", + "email_verified": true, + "floating_ip_limit": 3, + "status": "active", + "status_message": "", + "uuid": "aaaaaaaaaaaaaa" + } +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account") + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'account' information due to error : %s" + % response.json["message"] + ) + + module.exit_json(changed=False, data=response.json["account"]) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_account_facts", + "community.digitalocean.digital_ocean_account_facts", + ): + module.deprecate( + "The 'digital_ocean_account_facts' module has been renamed to 'digital_ocean_account_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_balance_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_balance_info.py new file mode 100644 index 00000000..3aea5353 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_balance_info.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Ansible Project +# Copyright: (c) 2021, Mark Mercado <mamercad@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_balance_info +short_description: Display DigitalOcean customer balance +description: + - This module can be used to display the DigitalOcean customer balance. +author: "Mark Mercado (@mamercad)" +version_added: 1.2.0 +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Display DigitalOcean customer balance + community.digitalocean.digital_ocean_balance_info: + oauth_token: "{{ oauth_token }}" +""" + + +RETURN = r""" +# DigitalOcean API info https://docs.digitalocean.com/reference/api/api-reference/#operation/get_customer_balance +data: + description: DigitalOcean customer balance + returned: success + type: dict + sample: { + "account_balance": "-27.52", + "generated_at": "2021-04-11T05:08:24Z", + "month_to_date_balance": "-27.40", + "month_to_date_usage": "0.00" + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +def run(module): + rest = DigitalOceanHelper(module) + + response = rest.get("customers/my/balance") + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'customers/my/balance' information due to error : %s" + % response.json["message"] + ) + + module.exit_json(changed=False, data=response.json) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_block_storage.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_block_storage.py new file mode 100644 index 00000000..8597eb1e --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_block_storage.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_block_storage +short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean +description: + - Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet. +options: + command: + description: + - Which operation do you want to perform. + choices: ['create', 'attach'] + required: true + type: str + state: + description: + - Indicate desired state of the target. + choices: ['present', 'absent'] + required: true + type: str + block_size: + description: + - The size of the Block Storage volume in gigabytes. + - Required when I(command=create) and I(state=present). + - If snapshot_id is included, this will be ignored. + - If block_size > current size of the volume, the volume is resized. + type: int + volume_name: + description: + - The name of the Block Storage volume. + type: str + required: true + description: + description: + - Description of the Block Storage volume. + type: str + region: + description: + - The slug of the region where your Block Storage volume should be located in. + - If I(snapshot_id) is included, this will be ignored. + type: str + snapshot_id: + description: + - The snapshot id you would like the Block Storage volume created with. + - If included, I(region) and I(block_size) will be ignored and changed to C(null). + type: str + droplet_id: + description: + - The droplet id you want to operate on. + - Required when I(command=attach). + type: int + project_name: + aliases: ["project"] + description: + - Project to assign the resource to (project name, not UUID). + - Defaults to the default project of the account (empty string). + - Currently only supported when C(command=create). + type: str + required: false + default: "" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. + They both refer to the v2 token. + - If snapshot_id is used, region and block_size will be ignored and changed to null. + +author: + - "Harnek Sidhu (@harneksidhu)" +""" + +EXAMPLES = r""" +- name: Create new Block Storage + community.digitalocean.digital_ocean_block_storage: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: create + region: nyc1 + block_size: 10 + volume_name: nyc1-block-storage + +- name: Create new Block Storage (and assign to Project "test") + community.digitalocean.digital_ocean_block_storage: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: create + region: nyc1 + block_size: 10 + volume_name: nyc1-block-storage + project_name: test + +- name: Resize an existing Block Storage + community.digitalocean.digital_ocean_block_storage: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: create + region: nyc1 + block_size: 20 + volume_name: nyc1-block-storage + +- name: Delete Block Storage + community.digitalocean.digital_ocean_block_storage: + state: absent + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: create + region: nyc1 + volume_name: nyc1-block-storage + +- name: Attach Block Storage to a Droplet + community.digitalocean.digital_ocean_block_storage: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: attach + volume_name: nyc1-block-storage + region: nyc1 + droplet_id: <ID> + +- name: Detach Block Storage from a Droplet + community.digitalocean.digital_ocean_block_storage: + state: absent + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + command: attach + volume_name: nyc1-block-storage + region: nyc1 + droplet_id: <ID> +""" + +RETURN = r""" +id: + description: Unique identifier of a Block Storage volume returned during creation. + returned: changed + type: str + sample: "69b25d9a-494c-12e6-a5af-001f53126b44" +msg: + description: Informational or error message encountered during execution + returned: changed + type: str + sample: No project named test2 found +assign_status: + description: Assignment status (ok, not_found, assigned, already_assigned, service_down) + returned: changed + type: str + sample: assigned +resources: + description: Resource assignment involved in project assignment + returned: changed + type: dict + sample: + assigned_at: '2021-10-25T17:39:38Z' + links: + self: https://api.digitalocean.com/v2/volumes/8691c49e-35ba-11ec-9406-0a58ac1472b9 + status: assigned + urn: do:volume:8691c49e-35ba-11ec-9406-0a58ac1472b9 +""" + +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, + DigitalOceanProjects, +) + + +class DOBlockStorageException(Exception): + pass + + +class DOBlockStorage(object): + def __init__(self, module): + self.module = module + self.rest = DigitalOceanHelper(module) + if self.module.params.get("project"): + # only load for non-default project assignments + self.projects = DigitalOceanProjects(module, self.rest) + + def get_key_or_fail(self, k): + v = self.module.params[k] + if v is None: + self.module.fail_json(msg="Unable to load %s" % k) + return v + + def poll_action_for_complete_status(self, action_id): + url = "actions/{0}".format(action_id) + end_time = time.monotonic() + self.module.params["timeout"] + while time.monotonic() < end_time: + time.sleep(10) + response = self.rest.get(url) + status = response.status_code + json = response.json + if status == 200: + if json["action"]["status"] == "completed": + return True + elif json["action"]["status"] == "errored": + raise DOBlockStorageException(json["message"]) + raise DOBlockStorageException( + "Unable to reach the DigitalOcean API at %s" + % self.module.params.get("baseurl") + ) + + def get_block_storage_by_name(self, volume_name, region): + url = "volumes?name={0}®ion={1}".format(volume_name, region) + resp = self.rest.get(url) + if resp.status_code != 200: + raise DOBlockStorageException(resp.json["message"]) + + volumes = resp.json["volumes"] + if not volumes: + return None + + return volumes[0] + + def get_attached_droplet_ID(self, volume_name, region): + volume = self.get_block_storage_by_name(volume_name, region) + if not volume or not volume["droplet_ids"]: + return None + + return volume["droplet_ids"][0] + + def attach_detach_block_storage(self, method, volume_name, region, droplet_id): + data = { + "type": method, + "volume_name": volume_name, + "region": region, + "droplet_id": droplet_id, + } + response = self.rest.post("volumes/actions", data=data) + status = response.status_code + json = response.json + if status == 202: + return self.poll_action_for_complete_status(json["action"]["id"]) + elif status == 200: + return True + elif status == 404 and method == "detach": + return False # Already detached + elif status == 422: + return False + else: + raise DOBlockStorageException(json["message"]) + + def resize_block_storage(self, volume_name, region, desired_size): + if not desired_size: + return False + + volume = self.get_block_storage_by_name(volume_name, region) + if volume["size_gigabytes"] == desired_size: + return False + + data = { + "type": "resize", + "size_gigabytes": desired_size, + } + resp = self.rest.post( + "volumes/{0}/actions".format(volume["id"]), + data=data, + ) + if resp.status_code == 202: + return self.poll_action_for_complete_status(resp.json["action"]["id"]) + else: + # we'd get status 422 if desired_size <= current volume size + raise DOBlockStorageException(resp.json["message"]) + + def create_block_storage(self): + volume_name = self.get_key_or_fail("volume_name") + snapshot_id = self.module.params["snapshot_id"] + if snapshot_id: + self.module.params["block_size"] = None + self.module.params["region"] = None + block_size = None + region = None + else: + block_size = self.get_key_or_fail("block_size") + region = self.get_key_or_fail("region") + description = self.module.params["description"] + data = { + "size_gigabytes": block_size, + "name": volume_name, + "description": description, + "region": region, + "snapshot_id": snapshot_id, + } + response = self.rest.post("volumes", data=data) + status = response.status_code + json = response.json + if status == 201: + project_name = self.module.params.get("project") + if ( + project_name + ): # empty string is the default project, skip project assignment + urn = "do:volume:{0}".format(json["volume"]["id"]) + ( + assign_status, + error_message, + resources, + ) = self.projects.assign_to_project(project_name, urn) + self.module.exit_json( + changed=True, + id=json["volume"]["id"], + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + else: + self.module.exit_json(changed=True, id=json["volume"]["id"]) + elif status == 409 and json["id"] == "conflict": + # The volume exists already, but it might not have the desired size + resized = self.resize_block_storage(volume_name, region, block_size) + self.module.exit_json(changed=resized) + else: + raise DOBlockStorageException(json["message"]) + + def delete_block_storage(self): + volume_name = self.get_key_or_fail("volume_name") + region = self.get_key_or_fail("region") + url = "volumes?name={0}®ion={1}".format(volume_name, region) + attached_droplet_id = self.get_attached_droplet_ID(volume_name, region) + if attached_droplet_id is not None: + self.attach_detach_block_storage( + "detach", volume_name, region, attached_droplet_id + ) + response = self.rest.delete(url) + status = response.status_code + json = response.json + if status == 204: + self.module.exit_json(changed=True) + elif status == 404: + self.module.exit_json(changed=False) + else: + raise DOBlockStorageException(json["message"]) + + def attach_block_storage(self): + volume_name = self.get_key_or_fail("volume_name") + region = self.get_key_or_fail("region") + droplet_id = self.get_key_or_fail("droplet_id") + attached_droplet_id = self.get_attached_droplet_ID(volume_name, region) + if attached_droplet_id is not None: + if attached_droplet_id == droplet_id: + self.module.exit_json(changed=False) + else: + self.attach_detach_block_storage( + "detach", volume_name, region, attached_droplet_id + ) + changed_status = self.attach_detach_block_storage( + "attach", volume_name, region, droplet_id + ) + self.module.exit_json(changed=changed_status) + + def detach_block_storage(self): + volume_name = self.get_key_or_fail("volume_name") + region = self.get_key_or_fail("region") + droplet_id = self.get_key_or_fail("droplet_id") + changed_status = self.attach_detach_block_storage( + "detach", volume_name, region, droplet_id + ) + self.module.exit_json(changed=changed_status) + + +def handle_request(module): + block_storage = DOBlockStorage(module) + command = module.params["command"] + state = module.params["state"] + if command == "create": + if state == "present": + block_storage.create_block_storage() + elif state == "absent": + block_storage.delete_block_storage() + elif command == "attach": + if state == "present": + block_storage.attach_block_storage() + elif state == "absent": + block_storage.detach_block_storage() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], required=True), + command=dict(choices=["create", "attach"], required=True), + block_size=dict(type="int", required=False), + volume_name=dict(type="str", required=True), + description=dict(type="str"), + region=dict(type="str", required=False), + snapshot_id=dict(type="str", required=False), + droplet_id=dict(type="int"), + project_name=dict(type="str", aliases=["project"], required=False, default=""), + ) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + handle_request(module) + except DOBlockStorageException as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + except KeyError as e: + module.fail_json(msg="Unable to load %s" % e, exception=traceback.format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_cdn_endpoints.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_cdn_endpoints.py new file mode 100644 index 00000000..d3617758 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_cdn_endpoints.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Ansible Project +# Copyright: (c) 2021, Mark Mercado <mamercad@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_cdn_endpoints +short_description: Create, update, and delete DigitalOcean CDN Endpoints +description: + - Create, update, and delete DigitalOcean CDN Endpoints +author: "Mark Mercado (@mamercad)" +version_added: 1.10.0 +options: + state: + description: + - The usual, C(present) to create, C(absent) to destroy + type: str + choices: ["present", "absent"] + default: present + origin: + description: + - The fully qualified domain name (FQDN) for the origin server which provides the content for the CDN. + - This is currently restricted to a Space. + type: str + required: true + ttl: + description: + - The amount of time the content is cached by the CDN's edge servers in seconds. + - TTL must be one of 60, 600, 3600, 86400, or 604800. + - Defaults to 3600 (one hour) when excluded. + type: int + choices: [60, 600, 3600, 86400, 604800] + default: 3600 + required: false + certificate_id: + description: + - The ID of a DigitalOcean managed TLS certificate used for SSL when a custom subdomain is provided. + type: str + required: false + custom_domain: + description: + - The fully qualified domain name (FQDN) of the custom subdomain used with the CDN endpoint. + type: str + required: false +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Create DigitalOcean CDN Endpoint + community.digitalocean.digital_ocean_cdn_endpoints: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + origin: mamercad.nyc3.digitaloceanspaces.com + +- name: Update DigitalOcean CDN Endpoint (change ttl to 600, default is 3600) + community.digitalocean.digital_ocean_cdn_endpoints: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + origin: mamercad.nyc3.digitaloceanspaces.com + ttl: 600 + +- name: Delete DigitalOcean CDN Endpoint + community.digitalocean.digital_ocean_cdn_endpoints: + state: absent + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + origin: mamercad.nyc3.digitaloceanspaces.com +""" + + +RETURN = r""" +data: + description: DigitalOcean CDN Endpoints + returned: success + type: dict + sample: + data: + endpoint: + created_at: '2021-09-05T13:47:23Z' + endpoint: mamercad.nyc3.cdn.digitaloceanspaces.com + id: 01739563-3f50-4da4-a451-27f6d59d7573 + origin: mamercad.nyc3.digitaloceanspaces.com + ttl: 3600 +""" + + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOCDNEndpoint(object): + def __init__(self, module): + self.module = module + self.rest = DigitalOceanHelper(module) + # pop the oauth token so we don't include it in the POST data + self.token = self.module.params.pop("oauth_token") + + def get_cdn_endpoints(self): + cdns = self.rest.get_paginated_data( + base_url="cdn/endpoints?", data_key_name="endpoints" + ) + return cdns + + def get_cdn_endpoint(self): + cdns = self.rest.get_paginated_data( + base_url="cdn/endpoints?", data_key_name="endpoints" + ) + found = None + for cdn in cdns: + if cdn.get("origin") == self.module.params.get("origin"): + found = cdn + for key in ["ttl", "certificate_id"]: + if self.module.params.get(key) != cdn.get(key): + return found, True + return found, False + + def create(self): + cdn, needs_update = self.get_cdn_endpoint() + + if cdn is not None: + if not needs_update: + # Have it already + self.module.exit_json(changed=False, msg=cdn) + if needs_update: + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + # Update it + request_params = dict(self.module.params) + + endpoint = "cdn/endpoints" + response = self.rest.put( + "{0}/{1}".format(endpoint, cdn.get("id")), data=request_params + ) + status_code = response.status_code + json_data = response.json + + # The API docs are wrong (they say 202 but return 200) + if status_code != 200: + self.module.fail_json( + changed=False, + msg="Failed to put {0} information due to error [HTTP {1}: {2}]".format( + endpoint, + status_code, + json_data.get("message", "(empty error message)"), + ), + ) + + self.module.exit_json(changed=True, data=json_data) + else: + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + # Create it + request_params = dict(self.module.params) + + endpoint = "cdn/endpoints" + response = self.rest.post(endpoint, data=request_params) + status_code = response.status_code + json_data = response.json + + if status_code != 201: + self.module.fail_json( + changed=False, + msg="Failed to post {0} information due to error [HTTP {1}: {2}]".format( + endpoint, + status_code, + json_data.get("message", "(empty error message)"), + ), + ) + + self.module.exit_json(changed=True, data=json_data) + + def delete(self): + cdn, needs_update = self.get_cdn_endpoint() + if cdn is not None: + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + # Delete it + endpoint = "cdn/endpoints/{0}".format(cdn.get("id")) + response = self.rest.delete(endpoint) + status_code = response.status_code + json_data = response.json + + if status_code != 204: + self.module.fail_json( + changed=False, + msg="Failed to delete {0} information due to error [HTTP {1}: {2}]".format( + endpoint, + status_code, + json_data.get("message", "(empty error message)"), + ), + ) + + self.module.exit_json( + changed=True, + msg="Deleted CDN Endpoint {0} ({1})".format( + cdn.get("origin"), cdn.get("id") + ), + ) + else: + self.module.exit_json(changed=False) + + +def run(module): + state = module.params.pop("state") + c = DOCDNEndpoint(module) + + # Pop these away (don't need them beyond DOCDNEndpoint) + module.params.pop("baseurl") + module.params.pop("validate_certs") + module.params.pop("timeout") + + if state == "present": + c.create() + elif state == "absent": + c.delete() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + origin=dict(type="str", required=True), + ttl=dict( + type="int", + choices=[60, 600, 3600, 86400, 604800], + required=False, + default=3600, + ), + certificate_id=dict(type="str", default=""), + custom_domain=dict(type="str", default=""), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_cdn_endpoints_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_cdn_endpoints_info.py new file mode 100644 index 00000000..7c8de494 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_cdn_endpoints_info.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Ansible Project +# Copyright: (c) 2021, Mark Mercado <mamercad@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_cdn_endpoints_info +short_description: Display DigitalOcean CDN Endpoints +description: + - Display DigitalOcean CDN Endpoints +author: "Mark Mercado (@mamercad)" +version_added: 1.10.0 +options: + state: + description: + - The usual, C(present) to create, C(absent) to destroy + type: str + choices: ["present", "absent"] + default: present +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Display DigitalOcean CDN Endpoints + community.digitalocean.digital_ocean_cdn_endpoints_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" +""" + + +RETURN = r""" +data: + description: DigitalOcean CDN Endpoints + returned: success + type: dict + sample: + data: + endpoints: + - created_at: '2021-09-05T13:47:23Z' + endpoint: mamercad.nyc3.cdn.digitaloceanspaces.com + id: 01739563-3f50-4da4-a451-27f6d59d7573 + origin: mamercad.nyc3.digitaloceanspaces.com + ttl: 3600 + meta: + total: 1 +""" + + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +def run(module): + rest = DigitalOceanHelper(module) + + endpoint = "cdn/endpoints" + response = rest.get(endpoint) + json_data = response.json + status_code = response.status_code + + if status_code != 200: + module.fail_json( + changed=False, + msg="Failed to get {0} information due to error [HTTP {1}: {2}]".format( + endpoint, status_code, json_data.get("message", "(empty error message)") + ), + ) + + module.exit_json(changed=False, data=json_data) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update(state=dict(choices=["present", "absent"], default="present")) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate.py new file mode 100644 index 00000000..60dd0fea --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Abhijeet Kasurde <akasurde@redhat.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_certificate +short_description: Manage certificates in DigitalOcean +description: + - Create, Retrieve and remove certificates DigitalOcean. +author: "Abhijeet Kasurde (@Akasurde)" +options: + name: + description: + - The name of the certificate. + required: True + type: str + private_key: + description: + - A PEM-formatted private key content of SSL Certificate. + type: str + leaf_certificate: + description: + - A PEM-formatted public SSL Certificate. + type: str + certificate_chain: + description: + - The full PEM-formatted trust chain between the certificate authority's certificate and your domain's SSL certificate. + type: str + state: + description: + - Whether the certificate should be present or absent. + default: present + choices: ['present', 'absent'] + type: str +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Two environment variables can be used, DO_API_KEY, DO_OAUTH_TOKEN and DO_API_TOKEN. + They both refer to the v2 token. +""" + + +EXAMPLES = r""" +- name: Create a certificate + community.digitalocean.digital_ocean_certificate: + name: production + state: present + private_key: "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkM8OI7pRpgyj1I\n-----END PRIVATE KEY-----" + leaf_certificate: "-----BEGIN CERTIFICATE-----\nMIIFDmg2Iaw==\n-----END CERTIFICATE-----" + oauth_token: b7d03a6947b217efb6f3ec3bd365652 + +- name: Create a certificate using file lookup plugin + community.digitalocean.digital_ocean_certificate: + name: production + state: present + private_key: "{{ lookup('file', 'test.key') }}" + leaf_certificate: "{{ lookup('file', 'test.cert') }}" + oauth_token: "{{ oauth_token }}" + +- name: Create a certificate with trust chain + community.digitalocean.digital_ocean_certificate: + name: production + state: present + private_key: "{{ lookup('file', 'test.key') }}" + leaf_certificate: "{{ lookup('file', 'test.cert') }}" + certificate_chain: "{{ lookup('file', 'chain.cert') }}" + oauth_token: "{{ oauth_token }}" + +- name: Remove a certificate + community.digitalocean.digital_ocean_certificate: + name: production + state: absent + oauth_token: "{{ oauth_token }}" + +""" + + +RETURN = r""" # """ + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + state = module.params["state"] + name = module.params["name"] + + rest = DigitalOceanHelper(module) + + results = dict(changed=False) + + response = rest.get("certificates") + status_code = response.status_code + resp_json = response.json + + if status_code != 200: + module.fail_json(msg="Failed to retrieve certificates for DigitalOcean") + + if state == "present": + for cert in resp_json["certificates"]: + if cert["name"] == name: + module.fail_json(msg="Certificate name %s already exists" % name) + + # Certificate does not exist, let us create it + cert_data = dict( + name=name, + private_key=module.params["private_key"], + leaf_certificate=module.params["leaf_certificate"], + ) + + if module.params["certificate_chain"] is not None: + cert_data.update(certificate_chain=module.params["certificate_chain"]) + + response = rest.post("certificates", data=cert_data) + status_code = response.status_code + if status_code == 500: + module.fail_json( + msg="Failed to upload certificates as the certificates are malformed." + ) + + resp_json = response.json + if status_code == 201: + results.update(changed=True, response=resp_json) + elif status_code == 422: + results.update(changed=False, response=resp_json) + + elif state == "absent": + cert_id_del = None + for cert in resp_json["certificates"]: + if cert["name"] == name: + cert_id_del = cert["id"] + + if cert_id_del is not None: + url = "certificates/{0}".format(cert_id_del) + response = rest.delete(url) + if response.status_code == 204: + results.update(changed=True) + else: + results.update(changed=False) + else: + module.fail_json(msg="Failed to find certificate %s" % name) + + module.exit_json(**results) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=True), + leaf_certificate=dict(type="str"), + private_key=dict(type="str", no_log=True), + state=dict(choices=["present", "absent"], default="present"), + certificate_chain=dict(type="str"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ("state", "present", ["leaf_certificate", "private_key"]), + ], + ) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate_facts.py new file mode 100644 index 00000000..c9125985 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate_facts.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_certificate_info +short_description: Gather information about DigitalOcean certificates +description: + - This module can be used to gather information about DigitalOcean provided certificates. + - This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + certificate_id: + description: + - Certificate ID that can be used to identify and reference a certificate. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all certificates + community.digitalocean.digital_ocean_certificate_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about certificate with given id + community.digitalocean.digital_ocean_certificate_info: + oauth_token: "{{ oauth_token }}" + certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf" + +- name: Get not after information about certificate + community.digitalocean.digital_ocean_certificate_info: + register: resp_out +- set_fact: + not_after_date: "{{ item.not_after }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='web-cert-01']" +- debug: + var: not_after_date +""" + + +RETURN = r""" +data: + description: DigitalOcean certificate information + returned: success + type: list + elements: dict + sample: [ + { + "id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "name": "web-cert-01", + "not_after": "2017-02-22T00:23:00Z", + "sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7", + "created_at": "2017-02-08T16:02:37Z" + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + certificate_id = module.params.get("certificate_id", None) + rest = DigitalOceanHelper(module) + + base_url = "certificates" + if certificate_id is not None: + response = rest.get("%s/%s" % (base_url, certificate_id)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve certificates for DigitalOcean") + + certificate = [response.json["certificate"]] + else: + certificate = rest.get_paginated_data( + base_url=base_url + "?", data_key_name="certificates" + ) + + module.exit_json(changed=False, data=certificate) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + certificate_id=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_certificate_facts", + "community.digitalocean.digital_ocean_certificate_facts", + ): + module.deprecate( + "The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate_info.py new file mode 100644 index 00000000..c9125985 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_certificate_info.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_certificate_info +short_description: Gather information about DigitalOcean certificates +description: + - This module can be used to gather information about DigitalOcean provided certificates. + - This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + certificate_id: + description: + - Certificate ID that can be used to identify and reference a certificate. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all certificates + community.digitalocean.digital_ocean_certificate_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about certificate with given id + community.digitalocean.digital_ocean_certificate_info: + oauth_token: "{{ oauth_token }}" + certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf" + +- name: Get not after information about certificate + community.digitalocean.digital_ocean_certificate_info: + register: resp_out +- set_fact: + not_after_date: "{{ item.not_after }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='web-cert-01']" +- debug: + var: not_after_date +""" + + +RETURN = r""" +data: + description: DigitalOcean certificate information + returned: success + type: list + elements: dict + sample: [ + { + "id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "name": "web-cert-01", + "not_after": "2017-02-22T00:23:00Z", + "sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7", + "created_at": "2017-02-08T16:02:37Z" + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + certificate_id = module.params.get("certificate_id", None) + rest = DigitalOceanHelper(module) + + base_url = "certificates" + if certificate_id is not None: + response = rest.get("%s/%s" % (base_url, certificate_id)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve certificates for DigitalOcean") + + certificate = [response.json["certificate"]] + else: + certificate = rest.get_paginated_data( + base_url=base_url + "?", data_key_name="certificates" + ) + + module.exit_json(changed=False, data=certificate) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + certificate_id=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_certificate_facts", + "community.digitalocean.digital_ocean_certificate_facts", + ): + module.deprecate( + "The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_database.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_database.py new file mode 100644 index 00000000..ffae82db --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_database.py @@ -0,0 +1,437 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# Copyright: (c) 2021, Mark Mercado <mmercado@digitalocean.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_database +short_description: Create and delete a DigitalOcean database +description: + - Create and delete a database in DigitalOcean and optionally wait for it to be online. + - DigitalOcean's managed database service simplifies the creation and management of highly available database clusters. + - Currently, it offers support for PostgreSQL, Redis, MySQL, and MongoDB. +version_added: 1.3.0 +author: "Mark Mercado (@mamercad)" +options: + state: + description: + - Indicates the desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + id: + description: + - A unique ID that can be used to identify and reference a database cluster. + type: int + aliases: ['database_id'] + name: + description: + - A unique, human-readable name for the database cluster. + type: str + required: true + engine: + description: + - A slug representing the database engine used for the cluster. + - The possible values are C(pg) for PostgreSQL, C(mysql) for MySQL, C(redis) for Redis, and C(mongodb) for MongoDB. + type: str + required: true + choices: ['pg', 'mysql', 'redis', 'mongodb'] + version: + description: + - A string representing the version of the database engine in use for the cluster. + - For C(pg), versions are 10, 11 and 12. + - For C(mysql), version is 8. + - For C(redis), version is 5. + - For C(mongodb), version is 4. + type: str + size: + description: + - The slug identifier representing the size of the nodes in the database cluster. + - See U(https://docs.digitalocean.com/reference/api/api-reference/#operation/create_database_cluster) for supported sizes. + type: str + required: true + aliases: ['size_id'] + region: + description: + - The slug identifier for the region where the database cluster is located. + type: str + required: true + aliases: ['region_id'] + num_nodes: + description: + - The number of nodes in the database cluster. + - Valid choices are 1, 2 or 3. + type: int + default: 1 + choices: [1, 2, 3] + tags: + description: + - An array of tags that have been applied to the database cluster. + type: list + elements: str + private_network_uuid: + description: + - A string specifying the UUID of the VPC to which the database cluster is assigned. + type: str + wait: + description: + - Wait for the database to be online before returning. + required: False + default: True + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds, when creating a database. + default: 600 + type: int + project_name: + aliases: ["project"] + description: + - Project to assign the resource to (project name, not UUID). + - Defaults to the default project of the account (empty string). + - Currently only supported when creating databases. + type: str + required: false + default: "" +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Create a Redis database + community.digitalocean.digital_ocean_database: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_KEY') }}" + state: present + name: testdatabase1 + engine: redis + size: db-s-1vcpu-1gb + region: nyc1 + num_nodes: 1 + register: my_database + +- name: Create a Redis database (and assign to Project "test") + community.digitalocean.digital_ocean_database: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_KEY') }}" + state: present + name: testdatabase1 + engine: redis + size: db-s-1vcpu-1gb + region: nyc1 + num_nodes: 1 + project_name: test + register: my_database +""" + + +RETURN = r""" +data: + description: A DigitalOcean database + returned: success + type: dict + sample: + database: + connection: + database: "" + host: testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com + password: REDACTED + port: 25061 + protocol: rediss + ssl: true + uri: rediss://default:REDACTED@testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com:25061 + user: default + created_at: "2021-04-21T15:41:14Z" + db_names: null + engine: redis + id: 37de10e4-808b-4f4b-b25f-7b5b3fd194ac + maintenance_window: + day: monday + hour: 11:33:47 + pending: false + name: testdatabase1 + num_nodes: 1 + private_connection: + database: "" + host: private-testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com + password: REDIS + port: 25061 + protocol: rediss + ssl: true + uri: rediss://default:REDACTED@private-testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com:25061 + user: default + private_network_uuid: 0db3519b-9efc-414a-8868-8f2e6934688c, + region: nyc1 + size: db-s-1vcpu-1gb + status: online + tags: null + users: null + version: 6 +msg: + description: Informational or error message encountered during execution + returned: changed + type: str + sample: No project named test2 found +assign_status: + description: Assignment status (ok, not_found, assigned, already_assigned, service_down) + returned: changed + type: str + sample: assigned +resources: + description: Resource assignment involved in project assignment + returned: changed + type: dict + sample: + assigned_at: '2021-10-25T17:39:38Z' + links: + self: https://api.digitalocean.com/v2/databases/126355fa-b147-40a6-850a-c44f5d2ad418 + status: assigned + urn: do:dbaas:126355fa-b147-40a6-850a-c44f5d2ad418 +""" + + +import json +import time +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, + DigitalOceanProjects, +) + + +class DODatabase(object): + def __init__(self, module): + self.module = module + self.rest = DigitalOceanHelper(module) + if self.module.params.get("project"): + # only load for non-default project assignments + self.projects = DigitalOceanProjects(module, self.rest) + # pop wait and wait_timeout so we don't include it in the POST data + self.wait = self.module.params.pop("wait", True) + self.wait_timeout = self.module.params.pop("wait_timeout", 600) + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.id = None + self.name = None + self.engine = None + self.version = None + self.num_nodes = None + self.region = None + self.status = None + self.size = None + + def get_by_id(self, database_id): + if database_id is None: + return None + response = self.rest.get("databases/{0}".format(database_id)) + json_data = response.json + if response.status_code == 200: + database = json_data.get("database", None) + if database is not None: + self.id = database.get("id", None) + self.name = database.get("name", None) + self.engine = database.get("engine", None) + self.version = database.get("version", None) + self.num_nodes = database.get("num_nodes", None) + self.region = database.get("region", None) + self.status = database.get("status", None) + self.size = database.get("size", None) + return json_data + return None + + def get_by_name(self, database_name): + if database_name is None: + return None + page = 1 + while page is not None: + response = self.rest.get("databases?page={0}".format(page)) + json_data = response.json + if response.status_code == 200: + databases = json_data.get("databases", None) + if databases is None or not isinstance(databases, list): + return None + for database in databases: + if database.get("name", None) == database_name: + self.id = database.get("id", None) + self.name = database.get("name", None) + self.engine = database.get("engine", None) + self.version = database.get("version", None) + self.status = database.get("status", None) + self.num_nodes = database.get("num_nodes", None) + self.region = database.get("region", None) + self.size = database.get("size", None) + return {"database": database} + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return None + + def get_database(self): + json_data = self.get_by_id(self.module.params["id"]) + if not json_data: + json_data = self.get_by_name(self.module.params["name"]) + return json_data + + def ensure_online(self, database_id): + end_time = time.monotonic() + self.wait_timeout + while time.monotonic() < end_time: + response = self.rest.get("databases/{0}".format(database_id)) + json_data = response.json + database = json_data.get("database", None) + if database is not None: + status = database.get("status", None) + if status is not None: + if status == "online": + return json_data + time.sleep(10) + self.module.fail_json(msg="Waiting for database online timeout") + + def create(self): + json_data = self.get_database() + + if json_data is not None: + database = json_data.get("database", None) + if database is not None: + self.module.exit_json(changed=False, data=json_data) + else: + self.module.fail_json( + changed=False, msg="Unexpected error, please file a bug" + ) + + if self.module.check_mode: + self.module.exit_json(changed=True) + + request_params = dict(self.module.params) + del request_params["id"] + + response = self.rest.post("databases", data=request_params) + json_data = response.json + if response.status_code >= 400: + self.module.fail_json(changed=False, msg=json_data["message"]) + database = json_data.get("database", None) + if database is None: + self.module.fail_json( + changed=False, + msg="Unexpected error; please file a bug https://github.com/ansible-collections/community.digitalocean/issues", + ) + + database_id = database.get("id", None) + if database_id is None: + self.module.fail_json( + changed=False, + msg="Unexpected error; please file a bug https://github.com/ansible-collections/community.digitalocean/issues", + ) + + if self.wait: + json_data = self.ensure_online(database_id) + + project_name = self.module.params.get("project") + if project_name: # empty string is the default project, skip project assignment + urn = "do:dbaas:{0}".format(database_id) + assign_status, error_message, resources = self.projects.assign_to_project( + project_name, urn + ) + self.module.exit_json( + changed=True, + data=json_data, + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + else: + self.module.exit_json(changed=True, data=json_data) + + def delete(self): + json_data = self.get_database() + if json_data is not None: + if self.module.check_mode: + self.module.exit_json(changed=True) + database = json_data.get("database", None) + database_id = database.get("id", None) + database_name = database.get("name", None) + database_region = database.get("region", None) + if database_id is not None: + response = self.rest.delete("databases/{0}".format(database_id)) + json_data = response.json + if response.status_code == 204: + self.module.exit_json( + changed=True, + msg="Deleted database {0} ({1}) in region {2}".format( + database_name, database_id, database_region + ), + ) + self.module.fail_json( + changed=False, + msg="Failed to delete database {0} ({1}) in region {2}: {3}".format( + database_name, + database_id, + database_region, + json_data["message"], + ), + ) + else: + self.module.fail_json( + changed=False, msg="Unexpected error, please file a bug" + ) + else: + self.module.exit_json( + changed=False, + msg="Database {0} in region {1} not found".format( + self.module.params["name"], self.module.params["region"] + ), + ) + + +def run(module): + state = module.params.pop("state") + database = DODatabase(module) + if state == "present": + database.create() + elif state == "absent": + database.delete() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + id=dict(type="int", aliases=["database_id"]), + name=dict(type="str", required=True), + engine=dict(choices=["pg", "mysql", "redis", "mongodb"], required=True), + version=dict(type="str"), + size=dict(type="str", aliases=["size_id"], required=True), + region=dict(type="str", aliases=["region_id"], required=True), + num_nodes=dict(type="int", choices=[1, 2, 3], default=1), + tags=dict(type="list", elements="str"), + private_network_uuid=dict(type="str"), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=600, type="int"), + project_name=dict(type="str", aliases=["project"], required=False, default=""), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(["id", "name"],), + required_if=( + [ + ("state", "present", ["name", "size", "engine", "region"]), + ("state", "absent", ["name", "size", "engine", "region"]), + ] + ), + supports_check_mode=True, + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_database_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_database_info.py new file mode 100644 index 00000000..cc599661 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_database_info.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# Copyright: (c) 2021, Mark Mercado <mmercado@digitalocean.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_database_info +short_description: Gather information about DigitalOcean databases +description: + - Gather information about DigitalOcean databases. +version_added: 1.3.0 +author: "Mark Mercado (@mamercad)" +options: + id: + description: + - A unique ID that can be used to identify and reference a database cluster. + type: int + aliases: ['database_id'] + required: false + name: + description: + - A unique, human-readable name for the database cluster. + type: str + required: false +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Gather all DigitalOcean databases + community.digitalocean.digital_ocean_database_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_KEY') }}" + register: my_databases +""" + + +RETURN = r""" +data: + description: List of DigitalOcean databases + returned: success + type: list + sample: [ + { + "connection": { + "database": "", + "host": "testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com", + "password": "REDACTED", + "port": 25061, + "protocol":"rediss", + "ssl": true, + "uri": "rediss://default:REDACTED@testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com:25061", + "user": "default" + }, + "created_at": "2021-04-21T15:41:14Z", + "db_names": null, + "engine": "redis", + "id": "37de10e4-808b-4f4b-b25f-7b5b3fd194ac", + "maintenance_window": { + "day": "monday", + "hour": "11:33:47", + "pending": false + }, + "name": "testdatabase1", + "num_nodes": 1, + "private_connection": { + "database": "", + "host": "private-testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com", + "password": "REDACTED", + "port": 25061, + "protocol": "rediss", + "ssl": true, + "uri": "rediss://default:REDACTED@private-testdatabase1-do-user-3097135-0.b.db.ondigitalocean.com:25061", + "user": "default" + }, + "private_network_uuid": "0db3519b-9efc-414a-8868-8f2e6934688c", + "region": "nyc1", + "size": "db-s-1vcpu-1gb", + "status": "online", + "tags": null, + "users": null, + "version": "6" + }, + ... + ] +""" + + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DODatabaseInfo(object): + def __init__(self, module): + self.module = module + self.rest = DigitalOceanHelper(module) + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.id = None + self.name = None + + def get_by_id(self, database_id): + if database_id is None: + return None + response = self.rest.get("databases/{0}".format(database_id)) + json_data = response.json + if response.status_code == 200: + database = json_data.get("database", None) + if database is not None: + self.id = database.get("id", None) + self.name = database.get("name", None) + return json_data + return None + + def get_by_name(self, database_name): + if database_name is None: + return None + page = 1 + while page is not None: + response = self.rest.get("databases?page={0}".format(page)) + json_data = response.json + if response.status_code == 200: + for database in json_data["databases"]: + if database.get("name", None) == database_name: + self.id = database.get("id", None) + self.name = database.get("name", None) + return {"database": database} + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return None + + def get_database(self): + json_data = self.get_by_id(self.module.params["id"]) + if not json_data: + json_data = self.get_by_name(self.module.params["name"]) + return json_data + + def get_databases(self): + all_databases = [] + page = 1 + while page is not None: + response = self.rest.get("databases?page={0}".format(page)) + json_data = response.json + if response.status_code == 200: + databases = json_data.get("databases", None) + if databases is not None and isinstance(databases, list): + all_databases.append(databases) + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return {"databases": all_databases} + + +def run(module): + id = module.params.get("id", None) + name = module.params.get("name", None) + + database = DODatabaseInfo(module) + + if id is not None or name is not None: + the_database = database.get_database() + if the_database: # Found it + module.exit_json(changed=False, data=the_database) + else: # Didn't find it + if id is not None and name is not None: + module.fail_json( + change=False, msg="Database {0} ({1}) not found".format(id, name) + ) + elif id is not None and name is None: + module.fail_json(change=False, msg="Database {0} not found".format(id)) + elif id is None and name is not None: + module.fail_json( + change=False, msg="Database {0} not found".format(name) + ) + else: + all_databases = database.get_databases() + module.exit_json(changed=False, data=all_databases) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + id=dict(type="int", aliases=["database_id"]), + name=dict(type="str"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain.py new file mode 100644 index 00000000..234c6cf2 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_domain +short_description: Create/delete a DNS domain in DigitalOcean +description: + - Create/delete a DNS domain in DigitalOcean. +author: "Michael Gregson (@mgregson)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + id: + description: + - The droplet id you want to operate on. + aliases: ['droplet_id'] + type: int + name: + description: + - The name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. + type: str + ip: + description: + - An 'A' record for '@' ($ORIGIN) will be created with the value 'ip'. 'ip' is an IP version 4 address. + type: str + aliases: ['ip4', 'ipv4'] + ip6: + description: + - An 'AAAA' record for '@' ($ORIGIN) will be created with the value 'ip6'. 'ip6' is an IP version 6 address. + type: str + aliases: ['ipv6'] + project_name: + aliases: ["project"] + description: + - Project to assign the resource to (project name, not UUID). + - Defaults to the default project of the account (empty string). + - Currently only supported when creating domains. + type: str + required: false + default: "" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Environment variables DO_OAUTH_TOKEN can be used for the oauth_token. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(oauth_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: Create a domain + community.digitalocean.digital_ocean_domain: + state: present + name: my.digitalocean.domain + ip: 127.0.0.1 + +- name: Create a domain (and associate to Project "test") + community.digitalocean.digital_ocean_domain: + state: present + name: my.digitalocean.domain + ip: 127.0.0.1 + project: test + +# Create a droplet and corresponding domain +- name: Create a droplet + community.digitalocean.digital_ocean: + state: present + name: test_droplet + size_id: 1gb + region_id: sgp1 + image_id: ubuntu-14-04-x64 + register: test_droplet + +- name: Create a corresponding domain + community.digitalocean.digital_ocean_domain: + state: present + name: "{{ test_droplet.droplet.name }}.my.domain" + ip: "{{ test_droplet.droplet.ip_address }}" + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, + DigitalOceanProjects, +) +import time + +ZONE_FILE_ATTEMPTS = 5 +ZONE_FILE_SLEEP = 3 + + +class DoManager(DigitalOceanHelper, object): + def __init__(self, module): + super(DoManager, self).__init__(module) + self.domain_name = module.params.get("name", None) + self.domain_ip = module.params.get("ip", None) + self.domain_id = module.params.get("id", None) + + @staticmethod + def jsonify(response): + return response.status_code, response.json + + def all_domains(self): + resp = self.get("domains/") + return resp + + def find(self): + if self.domain_name is None and self.domain_id is None: + return None + + domains = self.all_domains() + status, json = self.jsonify(domains) + for domain in json["domains"]: + if domain["name"] == self.domain_name: + return domain + return None + + def add(self): + params = {"name": self.domain_name, "ip_address": self.domain_ip} + resp = self.post("domains/", data=params) + status = resp.status_code + json = resp.json + if status == 201: + return json["domain"] + else: + return json + + def all_domain_records(self): + resp = self.get("domains/%s/records/" % self.domain_name) + return resp.json + + def domain_record(self): + resp = self.get("domains/%s" % self.domain_name) + status, json = self.jsonify(resp) + return json + + def destroy_domain(self): + resp = self.delete("domains/%s" % self.domain_name) + status, json = self.jsonify(resp) + if status == 204: + return True + else: + return json + + def edit_domain_record(self, record): + if self.module.params.get("ip"): + params = {"name": "@", "data": self.module.params.get("ip")} + if self.module.params.get("ip6"): + params = {"name": "@", "data": self.module.params.get("ip6")} + + resp = self.put( + "domains/%s/records/%s" % (self.domain_name, record["id"]), data=params + ) + status, json = self.jsonify(resp) + + return json["domain_record"] + + def create_domain_record(self): + if self.module.params.get("ip"): + params = {"name": "@", "type": "A", "data": self.module.params.get("ip")} + if self.module.params.get("ip6"): + params = { + "name": "@", + "type": "AAAA", + "data": self.module.params.get("ip6"), + } + + resp = self.post("domains/%s/records" % (self.domain_name), data=params) + status, json = self.jsonify(resp) + + return json["domain_record"] + + +def run(module): + do_manager = DoManager(module) + state = module.params.get("state") + + if module.params.get("project"): + # only load for non-default project assignments + projects = DigitalOceanProjects(module, do_manager) + + domain = do_manager.find() + if state == "present": + if not domain: + domain = do_manager.add() + if "message" in domain: + module.fail_json(changed=False, msg=domain["message"]) + else: + # We're at the mercy of a backend process which we have no visibility into: + # https://docs.digitalocean.com/reference/api/api-reference/#operation/create_domain + # + # In particular: "Keep in mind that, upon creation, the zone_file field will + # have a value of null until a zone file is generated and propagated through + # an automatic process on the DigitalOcean servers." + # + # Arguably, it's nice to see the records versus null, so, we'll just try a + # few times before giving up and returning null. + + domain_name = module.params.get("name") + project_name = module.params.get("project") + urn = "do:domain:{0}".format(domain_name) + + for i in range(ZONE_FILE_ATTEMPTS): + record = do_manager.domain_record() + if record is not None and "domain" in record: + domain = record.get("domain", None) + if domain is not None and "zone_file" in domain: + if ( + project_name + ): # empty string is the default project, skip project assignment + ( + assign_status, + error_message, + resources, + ) = projects.assign_to_project(project_name, urn) + module.exit_json( + changed=True, + domain=domain, + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + else: + module.exit_json(changed=True, domain=domain) + time.sleep(ZONE_FILE_SLEEP) + if ( + project_name + ): # empty string is the default project, skip project assignment + ( + assign_status, + error_message, + resources, + ) = projects.assign_to_project(project_name, urn) + module.exit_json( + changed=True, + domain=domain, + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + else: + module.exit_json(changed=True, domain=domain) + else: + records = do_manager.all_domain_records() + if module.params.get("ip"): + at_record = None + for record in records["domain_records"]: + if record["name"] == "@" and record["type"] == "A": + at_record = record + + if not at_record: + do_manager.create_domain_record() + module.exit_json(changed=True, domain=do_manager.find()) + elif not at_record["data"] == module.params.get("ip"): + do_manager.edit_domain_record(at_record) + module.exit_json(changed=True, domain=do_manager.find()) + + if module.params.get("ip6"): + at_record = None + for record in records["domain_records"]: + if record["name"] == "@" and record["type"] == "AAAA": + at_record = record + + if not at_record: + do_manager.create_domain_record() + module.exit_json(changed=True, domain=do_manager.find()) + elif not at_record["data"] == module.params.get("ip6"): + do_manager.edit_domain_record(at_record) + module.exit_json(changed=True, domain=do_manager.find()) + + module.exit_json(changed=False, domain=do_manager.domain_record()) + + elif state == "absent": + if not domain: + module.exit_json(changed=False, msg="Domain not found") + else: + delete_event = do_manager.destroy_domain() + if not delete_event: + module.fail_json(changed=False, msg=delete_event["message"]) + else: + module.exit_json(changed=True, event=None) + delete_event = do_manager.destroy_domain() + module.exit_json(changed=delete_event) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + name=dict(type="str"), + id=dict(aliases=["droplet_id"], type="int"), + ip=dict(type="str", aliases=["ip4", "ipv4"]), + ip6=dict(type="str", aliases=["ipv6"]), + project_name=dict(type="str", aliases=["project"], required=False, default=""), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(["id", "name"],), + mutually_exclusive=[("ip", "ip6")], + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_facts.py new file mode 100644 index 00000000..32382b28 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_facts.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_domain_info +short_description: Gather information about DigitalOcean Domains +description: + - This module can be used to gather information about DigitalOcean provided Domains. + - This module was called C(digital_ocean_domain_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + domain_name: + description: + - Name of the domain to gather information for. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all domains + community.digitalocean.digital_ocean_domain_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about domain with given name + community.digitalocean.digital_ocean_domain_info: + oauth_token: "{{ oauth_token }}" + domain_name: "example.com" + +- name: Get ttl from domain + community.digitalocean.digital_ocean_domain_info: + register: resp_out +- set_fact: + domain_ttl: "{{ item.ttl }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='example.com']" +- debug: + var: domain_ttl +""" + + +RETURN = r""" +data: + description: DigitalOcean Domain information + returned: success + elements: dict + type: list + sample: [ + { + "domain_records": [ + { + "data": "ns1.digitalocean.com", + "flags": null, + "id": 37826823, + "name": "@", + "port": null, + "priority": null, + "tag": null, + "ttl": 1800, + "type": "NS", + "weight": null + }, + ], + "name": "myexample123.com", + "ttl": 1800, + "zone_file": "myexample123.com. IN SOA ns1.digitalocean.com. hostmaster.myexample123.com. 1520702984 10800 3600 604800 1800\n", + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + domain_name = module.params.get("domain_name", None) + rest = DigitalOceanHelper(module) + domain_results = [] + + if domain_name is not None: + response = rest.get("domains/%s" % domain_name) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve domain for DigitalOcean") + + resp_json = response.json + domains = [resp_json["domain"]] + else: + domains = rest.get_paginated_data(base_url="domains?", data_key_name="domains") + + for temp_domain in domains: + temp_domain_dict = { + "name": temp_domain["name"], + "ttl": temp_domain["ttl"], + "zone_file": temp_domain["zone_file"], + "domain_records": list(), + } + + base_url = "domains/%s/records?" % temp_domain["name"] + + temp_domain_dict["domain_records"] = rest.get_paginated_data( + base_url=base_url, data_key_name="domain_records" + ) + domain_results.append(temp_domain_dict) + + module.exit_json(changed=False, data=domain_results) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + domain_name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_domain_facts", + "community.digitalocean.digital_ocean_domain_facts", + ): + module.deprecate( + "The 'digital_ocean_domain_facts' module has been renamed to 'digital_ocean_domain_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_info.py new file mode 100644 index 00000000..32382b28 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_info.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_domain_info +short_description: Gather information about DigitalOcean Domains +description: + - This module can be used to gather information about DigitalOcean provided Domains. + - This module was called C(digital_ocean_domain_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + domain_name: + description: + - Name of the domain to gather information for. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all domains + community.digitalocean.digital_ocean_domain_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about domain with given name + community.digitalocean.digital_ocean_domain_info: + oauth_token: "{{ oauth_token }}" + domain_name: "example.com" + +- name: Get ttl from domain + community.digitalocean.digital_ocean_domain_info: + register: resp_out +- set_fact: + domain_ttl: "{{ item.ttl }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='example.com']" +- debug: + var: domain_ttl +""" + + +RETURN = r""" +data: + description: DigitalOcean Domain information + returned: success + elements: dict + type: list + sample: [ + { + "domain_records": [ + { + "data": "ns1.digitalocean.com", + "flags": null, + "id": 37826823, + "name": "@", + "port": null, + "priority": null, + "tag": null, + "ttl": 1800, + "type": "NS", + "weight": null + }, + ], + "name": "myexample123.com", + "ttl": 1800, + "zone_file": "myexample123.com. IN SOA ns1.digitalocean.com. hostmaster.myexample123.com. 1520702984 10800 3600 604800 1800\n", + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + domain_name = module.params.get("domain_name", None) + rest = DigitalOceanHelper(module) + domain_results = [] + + if domain_name is not None: + response = rest.get("domains/%s" % domain_name) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve domain for DigitalOcean") + + resp_json = response.json + domains = [resp_json["domain"]] + else: + domains = rest.get_paginated_data(base_url="domains?", data_key_name="domains") + + for temp_domain in domains: + temp_domain_dict = { + "name": temp_domain["name"], + "ttl": temp_domain["ttl"], + "zone_file": temp_domain["zone_file"], + "domain_records": list(), + } + + base_url = "domains/%s/records?" % temp_domain["name"] + + temp_domain_dict["domain_records"] = rest.get_paginated_data( + base_url=base_url, data_key_name="domain_records" + ) + domain_results.append(temp_domain_dict) + + module.exit_json(changed=False, data=domain_results) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + domain_name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_domain_facts", + "community.digitalocean.digital_ocean_domain_facts", + ): + module.deprecate( + "The 'digital_ocean_domain_facts' module has been renamed to 'digital_ocean_domain_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_record.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_record.py new file mode 100644 index 00000000..05bc4a45 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_record.py @@ -0,0 +1,508 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: digital_ocean_domain_record +author: "Adam Papai (@woohgit)" +version_added: 1.1.0 +short_description: Manage DigitalOcean domain records +description: + - Create/delete a domain record in DigitalOcean. +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: [ present, absent ] + type: str + record_id: + description: + - Used with C(force_update=yes) and C(state='absent') to update or delete a specific record. + type: int + force_update: + description: + - If there is already a record with the same C(name) and C(type) force update it. + default: false + type: bool + domain: + description: + - Name of the domain. + required: true + type: str + type: + description: + - The type of record you would like to create. + choices: [ A, AAAA, CNAME, MX, TXT, SRV, NS, CAA ] + type: str + data: + description: + - This is the value of the record, depending on the record type. + default: "" + type: str + name: + description: + - Required for C(A, AAAA, CNAME, TXT, SRV) records. The host name, alias, or service being defined by the record. + default: "@" + type: str + priority: + description: + - The priority of the host for C(SRV, MX) records). + type: int + port: + description: + - The port that the service is accessible on for SRV records only. + type: int + weight: + description: + - The weight of records with the same priority for SRV records only. + type: int + ttl: + description: + - Time to live for the record, in seconds. + default: 1800 + type: int + flags: + description: + - An unsignedinteger between 0-255 used for CAA records. + type: int + tag: + description: + - The parameter tag for CAA records. + choices: [ issue, wildissue, iodef ] + type: str + oauth_token: + description: + - DigitalOcean OAuth token. Can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + aliases: ['API_TOKEN'] + type: str + +notes: + - Version 2 of DigitalOcean API is used. + - The number of requests that can be made through the API is currently limited to 5,000 per hour per OAuth token. +""" + +EXAMPLES = """ +- name: Create default A record for example.com + community.digitalocean.digital_ocean_domain_record: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + type: A + name: "@" + data: 127.0.0.1 + +- name: Create A record for www + community.digitalocean.digital_ocean_domain_record: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + type: A + name: www + data: 127.0.0.1 + +- name: Update A record for www based on name/type/data + community.digitalocean.digital_ocean_domain_record: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + type: A + name: www + data: 127.0.0.2 + force_update: yes + +- name: Update A record for www based on record_id + community.digitalocean.digital_ocean_domain_record: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + record_id: 123456 + type: A + name: www + data: 127.0.0.2 + force_update: yes + +- name: Remove www record based on name/type/data + community.digitalocean.digital_ocean_domain_record: + state: absent + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + type: A + name: www + data: 127.0.0.1 + +- name: Remove www record based on record_id + community.digitalocean.digital_ocean_domain_record: + state: absent + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + record_id: 1234567 + +- name: Create MX record with priority 10 for example.com + community.digitalocean.digital_ocean_domain_record: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + type: MX + data: mail1.example.com + priority: 10 +""" + +RETURN = r""" +data: + description: a DigitalOcean Domain Record + returned: success + type: dict + sample: { + "id": 3352896, + "type": "CNAME", + "name": "www", + "data": "192.168.0.1", + "priority": 10, + "port": 5556, + "ttl": 3600, + "weight": 10, + "flags": 16, + "tag": "issue" + } +""" + + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DigitalOceanDomainRecordManager(DigitalOceanHelper, object): + def __init__(self, module): + super(DigitalOceanDomainRecordManager, self).__init__(module) + self.module = module + self.domain = module.params.get("domain").lower() + self.records = self.__get_all_records() + self.payload = self.__build_payload() + self.force_update = module.params.get("force_update", False) + self.record_id = module.params.get("record_id", None) + + def check_credentials(self): + # Check if oauth_token is valid or not + response = self.get("account") + if response.status_code == 401: + self.module.fail_json( + msg="Failed to login using oauth_token, please verify validity of oauth_token" + ) + + def verify_domain(self): + # URL https://api.digitalocean.com/v2/domains/[NAME] + response = self.get("domains/%s" % self.domain) + status_code = response.status_code + json = response.json + + if status_code not in (200, 404): + self.module.fail_json( + msg="Error getting domain [%(status_code)s: %(json)s]" + % {"status_code": status_code, "json": json} + ) + elif status_code == 404: + self.module.fail_json( + msg="No domain named '%s' found. Please create a domain first" + % self.domain + ) + + def __get_all_records(self): + + records = [] + page = 1 + while True: + # GET /v2/domains/$DOMAIN_NAME/records + response = self.get( + "domains/%(domain)s/records?page=%(page)s" + % {"domain": self.domain, "page": page} + ) + status_code = response.status_code + json = response.json + + if status_code != 200: + self.module.fail_json( + msg="Error getting domain records [%(status_code)s: %(json)s]" + % {"status_code": status_code, "json": json} + ) + + for record in json["domain_records"]: + records.append(dict([(str(k), v) for k, v in record.items()])) + + if "pages" in json["links"] and "next" in json["links"]["pages"]: + page += 1 + else: + break + + return records + + def __normalize_data(self): + # for the MX, CNAME, SRV, CAA records make sure the data ends with a dot + if ( + self.payload["type"] in ["CNAME", "MX", "SRV", "CAA"] + and self.payload["data"] != "@" + and not self.payload["data"].endswith(".") + ): + data = "%s." % self.payload["data"] + else: + data = self.payload["data"] + + return data + + def __find_record_by_id(self, record_id): + for record in self.records: + if record["id"] == record_id: + return record + return None + + def __get_matching_records(self): + """Collect exact and similar records + + It returns an exact record if there is any match along with the record_id. + It also returns multiple records if there is no exact match + """ + + # look for exactly the same record used by (create, delete) + for record in self.records: + r = dict(record) + del r["id"] + # python3 does not have cmp so let's use the official workaround + if r == self.payload: + return r, record["id"], None + + # look for similar records used by (update) + similar_records = [] + for record in self.records: + if ( + record["type"] == self.payload["type"] + and record["name"] == self.payload["name"] + ): + similar_records.append(record) + + if similar_records: + return None, None, similar_records + + # if no exact neither similar records + return None, None, None + + def __create_record(self): + # before data comparison, we need to make sure that + # the payload['data'] is not normalized, but + # during create/update digitalocean expects normalized data + self.payload["data"] = self.__normalize_data() + + # POST /v2/domains/$DOMAIN_NAME/records + response = self.post("domains/%s/records" % self.domain, data=self.payload) + status_code = response.status_code + json = response.json + if status_code == 201: + changed = True + return changed, json["domain_record"] + else: + self.module.fail_json( + msg="Error creating domain record [%(status_code)s: %(json)s]" + % {"status_code": status_code, "json": json} + ) + + def create_or_update_record(self): + + # if record_id is given we need to update the record no matter what + if self.record_id: + changed, result = self.__update_record(self.record_id) + return changed, result + + record, record_id, similar_records = self.__get_matching_records() + + # create the record if no similar or exact record were found + if not record and not similar_records: + changed, result = self.__create_record() + return changed, result + + # no exact match, but we have similar records + # so if force_update == True we should update it + if not record and similar_records: + # if we have 1 similar record + if len(similar_records) == 1: + # update if we were told to do it so + if self.force_update: + record_id = similar_records[0]["id"] + changed, result = self.__update_record(record_id) + # if no update was given, create it + else: + changed, result = self.__create_record() + return changed, result + # we have multiple similar records, bun not exact match + else: + # we have multiple similar records, can't decide what to do + if self.force_update: + self.module.fail_json( + msg="Can't update record, too many similar records: %s" + % similar_records + ) + # create it + else: + changed, result = self.__create_record() + return changed, result + # record matches + else: + changed = False + result = "Record has been already created" + return changed, result + + def __update_record(self, record_id): + # before data comparison, we need to make sure that + # the payload['data'] is not normalized, but + # during create/update digitalocean expects normalized data + self.payload["data"] = self.__normalize_data() + + # double check if the record exist + record = self.__find_record_by_id(record_id) + + # record found + if record: + # PUT /v2/domains/$DOMAIN_NAME/records/$RECORD_ID + response = self.put( + "domains/%(domain)s/records/%(record_id)s" + % {"domain": self.domain, "record_id": record_id}, + data=self.payload, + ) + status_code = response.status_code + json = response.json + if status_code == 200: + changed = True + return changed, json["domain_record"] + else: + self.module.fail_json( + msg="Error updating domain record [%(status_code)s: %(json)s]" + % {"status_code": status_code, "json": json} + ) + # recond not found + else: + self.module.fail_json( + msg="Error updating domain record. Record does not exist. [%s]" + % record_id + ) + + def __build_payload(self): + + payload = dict( + data=self.module.params.get("data"), + flags=self.module.params.get("flags"), + name=self.module.params.get("name"), + port=self.module.params.get("port"), + priority=self.module.params.get("priority"), + type=self.module.params.get("type"), + tag=self.module.params.get("tag"), + ttl=self.module.params.get("ttl"), + weight=self.module.params.get("weight"), + ) + + # DigitalOcean stores every data in lowercase except TXT + if payload["type"] != "TXT" and payload["data"]: + payload["data"] = payload["data"].lower() + + # digitalocean stores data: '@' if the data=domain + if payload["data"] == self.domain: + payload["data"] = "@" + + return payload + + def delete_record(self): + + # if record_id is given, try to find the record based on the id + if self.record_id: + record = self.__find_record_by_id(self.record_id) + record_id = self.record_id + # if no record_id is given, try to a single matching record + else: + record, record_id, similar_records = self.__get_matching_records() + if not record and similar_records: + if len(similar_records) == 1: + record, record_id = similar_records[0], similar_records[0]["id"] + else: + self.module.fail_json( + msg="Can't delete record, too many similar records: %s" + % similar_records + ) + # record was not found, we're done + if not record: + changed = False + return changed, record + # record found, lets delete it + else: + # DELETE /v2/domains/$DOMAIN_NAME/records/$RECORD_ID. + response = self.delete( + "domains/%(domain)s/records/%(id)s" + % {"domain": self.domain, "id": record_id} + ) + status_code = response.status_code + json = response.json + if status_code == 204: + changed = True + msg = "Successfully deleted %s" % record["name"] + return changed, msg + else: + self.module.fail_json( + msg="Error deleting domain record. [%(status_code)s: %(json)s]" + % {"status_code": status_code, "json": json} + ) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=["present", "absent"], default="present"), + oauth_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + ), + force_update=dict(type="bool", default=False), + record_id=dict(type="int"), + domain=dict(type="str", required=True), + type=dict(choices=["A", "AAAA", "CNAME", "MX", "TXT", "SRV", "NS", "CAA"]), + name=dict(type="str", default="@"), + data=dict(type="str"), + priority=dict(type="int"), + port=dict(type="int"), + weight=dict(type="int"), + ttl=dict(type="int", default=1800), + tag=dict(choices=["issue", "wildissue", "iodef"]), + flags=dict(type="int"), + ), + # TODO + # somehow define the absent requirements: record_id OR ('name', 'type', 'data') + required_if=[("state", "present", ("type", "name", "data"))], + ) + + manager = DigitalOceanDomainRecordManager(module) + + # verify credentials and domain + manager.check_credentials() + manager.verify_domain() + + state = module.params.get("state") + + if state == "present": + changed, result = manager.create_or_update_record() + elif state == "absent": + changed, result = manager.delete_record() + + module.exit_json(changed=changed, result=result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_record_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_record_info.py new file mode 100644 index 00000000..b42a7aaa --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_domain_record_info.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_domain_record_info +short_description: Gather information about DigitalOcean domain records +description: + - Gather information about DigitalOcean domain records. +version_added: 1.16.0 +author: + - "Adam Papai (@woohgit)" + - Mark Mercado (@mamercad) +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ["present"] + type: str + name: + description: + - Name of the domain. + required: true + type: str + aliases: ["domain", "domain_name"] + record_id: + description: + - Used to retrieve a specific record. + type: int + type: + description: + - The type of record you would like to retrieve. + choices: ["A", "AAAA", "CNAME", "MX", "TXT", "SRV", "NS", "CAA"] + type: str +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +notes: + - Version 2 of DigitalOcean API is used. + - The number of requests that can be made through the API is currently limited to 5,000 per hour per OAuth token. +""" + +EXAMPLES = r""" +- name: Retrieve all domain records for example.com + community.digitalocean.digital_ocean_domain_record_info: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + +- name: Get specific domain record by ID + community.digitalocean.digital_ocean_domain_record_info: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + record_id: 12345789 + register: result + +- name: Retrieve all A domain records for example.com + community.digitalocean.digital_ocean_domain_record_info: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + domain: example.com + type: A +""" + +RETURN = r""" +data: + description: list of DigitalOcean domain records + returned: success + type: list + elements: dict + sample: + - data: ns1.digitalocean.com + flags: null + id: 296972269 + name: '@' + port: null + priority: null + tag: null + ttl: 1800 + type: NS + weight: null +""" + + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DigitalOceanDomainRecordManager(DigitalOceanHelper, object): + def __init__(self, module): + super(DigitalOceanDomainRecordManager, self).__init__(module) + self.module = module + self.domain = module.params.get("name").lower() + self.records = self.__get_all_records() + self.payload = self.__build_payload() + self.force_update = module.params.get("force_update", False) + self.record_id = module.params.get("record_id", None) + self.records_by_id = self.__find_record_by_id(self.record_id) + + def check_credentials(self): + # Check if oauth_token is valid or not + response = self.get("account") + if response.status_code == 401: + self.module.fail_json( + msg="Failed to login using oauth_token, please verify validity of oauth_token" + ) + + def __get_all_records(self): + + records = [] + page = 1 + while True: + # GET /v2/domains/$DOMAIN_NAME/records + type = self.module.params.get("type") + if type: + response = self.get( + "domains/%(domain)s/records?type=%(type)s&page=%(page)s" + % {"domain": self.domain, "type": type, "page": page} + ) + else: + response = self.get( + "domains/%(domain)s/records?page=%(page)s" + % {"domain": self.domain, "page": page} + ) + status_code = response.status_code + json = response.json + + if status_code != 200: + self.module.exit_json( + msg="Error getting domain records [%(status_code)s: %(json)s]" + % {"status_code": status_code, "json": json} + ) + + domain_records = json.get("domain_records", []) + for record in domain_records: + records.append(dict([(str(k), v) for k, v in record.items()])) + + links = json.get("links") + if links: + pages = links.get("pages") + if pages: + if "next" in pages: + page += 1 + else: + break + else: + break + else: + break + + return records + + def get_records(self): + return False, self.records + + def get_records_by_id(self): + if self.records_by_id: + return False, [self.records_by_id] + else: + return False, [] + + def __find_record_by_id(self, record_id): + for record in self.records: + if record["id"] == record_id: + return record + return None + + def __build_payload(self): + + payload = dict( + name=self.module.params.get("name"), + type=self.module.params.get("type"), + ) + + payload_data = payload.get("data") + if payload_data: + # digitalocean stores data: '@' if the data=domain + if payload["data"] == self.domain: + payload["data"] = "@" + + return payload + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present"], default="present"), + name=dict(type="str", aliases=["domain", "domain_name"], required=True), + record_id=dict(type="int"), + type=dict( + type="str", + choices=["A", "AAAA", "CNAME", "MX", "TXT", "SRV", "NS", "CAA"], + ), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + manager = DigitalOceanDomainRecordManager(module) + + # verify credentials and domain + manager.check_credentials() + + state = module.params.get("state") + record_id = module.params.get("record_id") + + if state == "present": + if record_id: + changed, result = manager.get_records_by_id() + else: + changed, result = manager.get_records() + module.exit_json(changed=changed, data={"records": result}) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_droplet.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_droplet.py new file mode 100644 index 00000000..791f2891 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_droplet.py @@ -0,0 +1,918 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_droplet +short_description: Create and delete a DigitalOcean droplet +description: + - Create and delete a droplet in DigitalOcean and optionally wait for it to be active. +author: + - Gurchet Rai (@gurch101) + - Mark Mercado (@mamercad) +options: + state: + description: + - Indicate desired state of the target. + - C(present) will create the named droplet; be mindful of the C(unique_name) parameter. + - C(absent) will delete the named droplet, if it exists. + - C(active) will create the named droplet (unless it exists) and ensure that it is powered on. + - C(inactive) will create the named droplet (unless it exists) and ensure that it is powered off. + default: present + choices: ["present", "absent", "active", "inactive"] + type: str + id: + description: + - The Droplet ID you want to operate on. + aliases: ["droplet_id"] + type: int + name: + description: + - This is the name of the Droplet. + - Must be formatted by hostname rules. + type: str + unique_name: + description: + - Require unique hostnames. + - By default, DigitalOcean allows multiple hosts with the same name. + - Setting this to C(true) allows only one host per name. + - Useful for idempotence. + default: false + type: bool + size: + description: + - This is the slug of the size you would like the Droplet created with. + - Please see U(https://slugs.do-api.dev/) for current slugs. + aliases: ["size_id"] + type: str + image: + description: + - This is the slug of the image you would like the Droplet created with. + aliases: ["image_id"] + type: str + region: + description: + - This is the slug of the region you would like your Droplet to be created in. + aliases: ["region_id"] + type: str + ssh_keys: + description: + - Array of SSH key fingerprints that you would like to be added to the Droplet. + required: false + type: list + elements: str + firewall: + description: + - Array of firewall names to apply to the Droplet. + - Omitting a firewall name that is currently applied to a droplet will remove it. + required: false + type: list + elements: str + private_networking: + description: + - Add an additional, private network interface to the Droplet (for inter-Droplet communication). + default: false + type: bool + vpc_uuid: + description: + - A string specifying the UUID of the VPC to which the Droplet will be assigned. + - If excluded, the Droplet will be assigned to the account's default VPC for the region. + type: str + version_added: 0.1.0 + user_data: + description: + - Opaque blob of data which is made available to the Droplet. + required: False + type: str + ipv6: + description: + - Enable IPv6 for the Droplet. + required: false + default: false + type: bool + wait: + description: + - Wait for the Droplet to be active before returning. + - If wait is C(false) an IP address may not be returned. + required: false + default: true + type: bool + wait_timeout: + description: + - How long before C(wait) gives up, in seconds, when creating a Droplet. + default: 120 + type: int + backups: + description: + - Indicates whether automated backups should be enabled. + required: false + default: false + type: bool + monitoring: + description: + - Indicates whether to install the DigitalOcean agent for monitoring. + required: false + default: false + type: bool + tags: + description: + - A list of tag names as strings to apply to the Droplet after it is created. + - Tag names can either be existing or new tags. + required: false + type: list + elements: str + volumes: + description: + - A list including the unique string identifier for each Block Storage volume to be attached to the Droplet. + required: False + type: list + elements: str + resize_disk: + description: + - Whether to increase disk size on resize. + - Only consulted if the C(unique_name) is C(true). + - Droplet C(size) must dictate an increase. + required: false + default: false + type: bool + project_name: + aliases: ["project"] + description: + - Project to assign the resource to (project name, not UUID). + - Defaults to the default project of the account (empty string). + - Currently only supported when creating. + type: str + required: false + default: "" + sleep_interval: + description: + - How long to C(sleep) in between action and status checks. + - Default is 10 seconds; this should be less than C(wait_timeout) and nonzero. + default: 10 + type: int +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Create a new Droplet + community.digitalocean.digital_ocean_droplet: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: mydroplet + size: s-1vcpu-1gb + region: sfo3 + image: ubuntu-20-04-x64 + wait_timeout: 500 + ssh_keys: [ .... ] + register: my_droplet + +- name: Show Droplet info + ansible.builtin.debug: + msg: | + Droplet ID is {{ my_droplet.data.droplet.id }} + First Public IPv4 is {{ (my_droplet.data.droplet.networks.v4 | selectattr('type', 'equalto', 'public')).0.ip_address | default('<none>', true) }} + First Private IPv4 is {{ (my_droplet.data.droplet.networks.v4 | selectattr('type', 'equalto', 'private')).0.ip_address | default('<none>', true) }} + +- name: Create a new Droplet (and assign to Project "test") + community.digitalocean.digital_ocean_droplet: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: mydroplet + size: s-1vcpu-1gb + region: sfo3 + image: ubuntu-20-04-x64 + wait_timeout: 500 + ssh_keys: [ .... ] + project: test + register: my_droplet + +- name: Ensure a Droplet is present + community.digitalocean.digital_ocean_droplet: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + id: 123 + name: mydroplet + size: s-1vcpu-1gb + region: sfo3 + image: ubuntu-20-04-x64 + wait_timeout: 500 + +- name: Ensure a Droplet is present and has firewall rules applied + community.digitalocean.digital_ocean_droplet: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + id: 123 + name: mydroplet + size: s-1vcpu-1gb + region: sfo3 + image: ubuntu-20-04-x64 + firewall: ['myfirewall', 'anotherfirewall'] + wait_timeout: 500 + +- name: Ensure a Droplet is present with SSH keys installed + community.digitalocean.digital_ocean_droplet: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + id: 123 + name: mydroplet + size: s-1vcpu-1gb + region: sfo3 + ssh_keys: ['1534404', '1784768'] + image: ubuntu-20-04-x64 + wait_timeout: 500 +""" + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/Droplets +data: + description: a DigitalOcean Droplet + returned: changed + type: dict + sample: + ip_address: 104.248.118.172 + ipv6_address: 2604:a880:400:d1::90a:6001 + private_ipv4_address: 10.136.122.141 + droplet: + id: 3164494 + name: example.com + memory: 512 + vcpus: 1 + disk: 20 + locked: true + status: new + kernel: + id: 2233 + name: Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic + version: 3.13.0-37-generic + created_at: "2014-11-14T16:36:31Z" + features: ["virtio"] + backup_ids: [] + snapshot_ids: [] + image: {} + volume_ids: [] + size: {} + size_slug: 512mb + networks: {} + region: {} + tags: ["web"] +msg: + description: Informational or error message encountered during execution + returned: changed + type: str + sample: No project named test2 found +assign_status: + description: Assignment status (ok, not_found, assigned, already_assigned, service_down) + returned: changed + type: str + sample: assigned +resources: + description: Resource assignment involved in project assignment + returned: changed + type: dict + sample: + assigned_at: '2021-10-25T17:39:38Z' + links: + self: https://api.digitalocean.com/v2/droplets/3164494 + status: assigned + urn: do:droplet:3164494 +""" + +import time +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, + DigitalOceanProjects, +) + + +class DODroplet(object): + + failure_message = { + "empty_response": "Empty response from the DigitalOcean API; please try again or open a bug if it never " + "succeeds.", + "resizing_off": "Droplet must be off prior to resizing: " + "https://docs.digitalocean.com/reference/api/api-reference/#operation/post_droplet_action", + "unexpected": "Unexpected error [{0}]; please file a bug: " + "https://github.com/ansible-collections/community.digitalocean/issues", + "support_action": "Error status on Droplet action [{0}], please try again or contact DigitalOcean support: " + "https://docs.digitalocean.com/support/", + "failed_to": "Failed to {0} {1} [HTTP {2}: {3}]", + } + + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + self.wait = self.module.params.pop("wait", True) + self.wait_timeout = self.module.params.pop("wait_timeout", 120) + self.unique_name = self.module.params.pop("unique_name", False) + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.id = None + self.name = None + self.size = None + self.status = None + if self.module.params.get("project"): + # only load for non-default project assignments + self.projects = DigitalOceanProjects(module, self.rest) + self.firewalls = self.get_firewalls() + self.sleep_interval = self.module.params.pop("sleep_interval", 10) + if self.wait: + if self.sleep_interval > self.wait_timeout: + self.module.fail_json( + msg="Sleep interval {0} should be less than {1}".format( + self.sleep_interval, self.wait_timeout + ) + ) + if self.sleep_interval <= 0: + self.module.fail_json( + msg="Sleep interval {0} should be greater than zero".format( + self.sleep_interval + ) + ) + + def get_firewalls(self): + response = self.rest.get("firewalls") + status_code = response.status_code + json_data = response.json + if status_code != 200: + self.module.fail_json(msg="Failed to get firewalls", data=json_data) + + return self.rest.get_paginated_data( + base_url="firewalls?", data_key_name="firewalls" + ) + + def get_firewall_by_name(self): + rule = {} + item = 0 + for firewall in self.firewalls: + for firewall_name in self.module.params["firewall"]: + if firewall_name in firewall["name"]: + rule[item] = {} + rule[item].update(firewall) + item += 1 + if len(rule) > 0: + return rule + return None + + def add_droplet_to_firewalls(self): + changed = False + rule = self.get_firewall_by_name() + if rule is None: + err = "Failed to find firewalls: {0}".format(self.module.params["firewall"]) + return err + json_data = self.get_droplet() + if json_data is not None: + request_params = {} + droplet = json_data.get("droplet", None) + droplet_id = droplet.get("id", None) + request_params["droplet_ids"] = [droplet_id] + for firewall in rule: + if droplet_id not in rule[firewall]["droplet_ids"]: + response = self.rest.post( + "firewalls/{0}/droplets".format(rule[firewall]["id"]), + data=request_params, + ) + json_data = response.json + status_code = response.status_code + if status_code != 204: + err = "Failed to add droplet {0} to firewall {1}".format( + droplet_id, rule[firewall]["id"] + ) + return err, changed + changed = True + return None, changed + + def remove_droplet_from_firewalls(self): + changed = False + json_data = self.get_droplet() + if json_data is not None: + request_params = {} + droplet = json_data.get("droplet", None) + droplet_id = droplet.get("id", None) + request_params["droplet_ids"] = [droplet_id] + for firewall in self.firewalls: + if ( + firewall["name"] not in self.module.params["firewall"] + and droplet_id in firewall["droplet_ids"] + ): + response = self.rest.delete( + "firewalls/{0}/droplets".format(firewall["id"]), + data=request_params, + ) + json_data = response.json + status_code = response.status_code + if status_code != 204: + err = "Failed to remove droplet {0} from firewall {1}".format( + droplet_id, firewall["id"] + ) + return err, changed + changed = True + return None, changed + + def get_by_id(self, droplet_id): + if not droplet_id: + return None + response = self.rest.get("droplets/{0}".format(droplet_id)) + status_code = response.status_code + json_data = response.json + if json_data is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["empty_response"], + ) + else: + if status_code == 200: + droplet = json_data.get("droplet", None) + if droplet is not None: + self.id = droplet.get("id", None) + self.name = droplet.get("name", None) + self.size = droplet.get("size_slug", None) + self.status = droplet.get("status", None) + return json_data + return None + + def get_by_name(self, droplet_name): + if not droplet_name: + return None + page = 1 + while page is not None: + response = self.rest.get("droplets?page={0}".format(page)) + json_data = response.json + status_code = response.status_code + if json_data is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["empty_response"], + ) + else: + if status_code == 200: + droplets = json_data.get("droplets", []) + for droplet in droplets: + if droplet.get("name", None) == droplet_name: + self.id = droplet.get("id", None) + self.name = droplet.get("name", None) + self.size = droplet.get("size_slug", None) + self.status = droplet.get("status", None) + return {"droplet": droplet} + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return None + + def get_addresses(self, data): + """Expose IP addresses as their own property allowing users extend to additional tasks""" + _data = data + for k, v in data.items(): + setattr(self, k, v) + networks = _data["droplet"]["networks"] + for network in networks.get("v4", []): + if network["type"] == "public": + _data["ip_address"] = network["ip_address"] + else: + _data["private_ipv4_address"] = network["ip_address"] + for network in networks.get("v6", []): + if network["type"] == "public": + _data["ipv6_address"] = network["ip_address"] + else: + _data["private_ipv6_address"] = network["ip_address"] + return _data + + def get_droplet(self): + json_data = self.get_by_id(self.module.params["id"]) + if not json_data and self.unique_name: + json_data = self.get_by_name(self.module.params["name"]) + return json_data + + def resize_droplet(self, state, droplet_id): + if self.status != "off": + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["resizing_off"], + ) + + self.wait_action( + droplet_id, + { + "type": "resize", + "disk": self.module.params["resize_disk"], + "size": self.module.params["size"], + }, + ) + + if state == "active": + self.ensure_power_on(droplet_id) + + # Get updated Droplet data + json_data = self.get_droplet() + droplet = json_data.get("droplet", None) + if droplet is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format("no Droplet"), + ) + + self.module.exit_json( + changed=True, + msg="Resized Droplet {0} ({1}) from {2} to {3}".format( + self.name, self.id, self.size, self.module.params["size"] + ), + data={"droplet": droplet}, + ) + + def wait_status(self, droplet_id, desired_statuses): + # Make sure Droplet is active first + end_time = time.monotonic() + self.wait_timeout + while time.monotonic() < end_time: + response = self.rest.get("droplets/{0}".format(droplet_id)) + json_data = response.json + status_code = response.status_code + message = json_data.get("message", "no error message") + droplet = json_data.get("droplet", None) + droplet_status = droplet.get("status", None) if droplet else None + + if droplet is None or droplet_status is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format( + "no Droplet or status" + ), + ) + + if status_code >= 400: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["failed_to"].format( + "get", "Droplet", status_code, message + ), + ) + + if droplet_status in desired_statuses: + return + + time.sleep(self.sleep_interval) + + self.module.fail_json( + msg="Wait for Droplet [{0}] status timeout".format( + ",".join(desired_statuses) + ) + ) + + def wait_check_action(self, droplet_id, action_id): + end_time = time.monotonic() + self.wait_timeout + while time.monotonic() < end_time: + response = self.rest.get( + "droplets/{0}/actions/{1}".format(droplet_id, action_id) + ) + json_data = response.json + status_code = response.status_code + message = json_data.get("message", "no error message") + action = json_data.get("action", None) + action_id = action.get("id", None) + action_status = action.get("status", None) + + if action is None or action_id is None or action_status is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format( + "no action, ID, or status" + ), + ) + + if status_code >= 400: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["failed_to"].format( + "get", "action", status_code, message + ), + ) + + if action_status == "errored": + self.module.fail_json( + changed=True, + msg=DODroplet.failure_message["support_action"].format(action_id), + ) + + if action_status == "completed": + return + + time.sleep(self.sleep_interval) + + self.module.fail_json(msg="Wait for Droplet action timeout") + + def wait_action(self, droplet_id, desired_action_data): + action_type = desired_action_data.get("type", "undefined") + + response = self.rest.post( + "droplets/{0}/actions".format(droplet_id), data=desired_action_data + ) + json_data = response.json + status_code = response.status_code + message = json_data.get("message", "no error message") + action = json_data.get("action", None) + action_id = action.get("id", None) + action_status = action.get("status", None) + + if action is None or action_id is None or action_status is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format( + "no action, ID, or status" + ), + ) + + if status_code >= 400: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["failed_to"].format( + "post", "action", status_code, message + ), + ) + + # Keep checking till it is done or times out + self.wait_check_action(droplet_id, action_id) + + def ensure_power_on(self, droplet_id): + # Make sure Droplet is active or off first + self.wait_status(droplet_id, ["active", "off"]) + # Trigger power-on + self.wait_action(droplet_id, {"type": "power_on"}) + + def ensure_power_off(self, droplet_id): + # Make sure Droplet is active first + self.wait_status(droplet_id, ["active"]) + # Trigger power-off + self.wait_action(droplet_id, {"type": "power_off"}) + + def create(self, state): + json_data = self.get_droplet() + # We have the Droplet + if json_data is not None: + droplet = json_data.get("droplet", None) + droplet_id = droplet.get("id", None) + droplet_size = droplet.get("size_slug", None) + + if droplet_id is None or droplet_size is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format( + "no Droplet ID or size" + ), + ) + + # Add droplet to a firewall if specified + if self.module.params["firewall"] is not None: + firewall_changed = False + if len(self.module.params["firewall"]) > 0: + firewall_add, add_changed = self.add_droplet_to_firewalls() + if firewall_add is not None: + self.module.fail_json( + changed=False, + msg=firewall_add, + data={"droplet": droplet, "firewall": firewall_add}, + ) + firewall_changed = firewall_changed or add_changed + firewall_remove, remove_changed = self.remove_droplet_from_firewalls() + if firewall_remove is not None: + self.module.fail_json( + changed=False, + msg=firewall_remove, + data={"droplet": droplet, "firewall": firewall_remove}, + ) + firewall_changed = firewall_changed or remove_changed + self.module.exit_json( + changed=firewall_changed, + data={"droplet": droplet}, + ) + + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=False) + + # Ensure Droplet size + if droplet_size != self.module.params.get("size", None): + self.resize_droplet(state, droplet_id) + + # Ensure Droplet power state + droplet_data = self.get_addresses(json_data) + droplet_id = droplet.get("id", None) + droplet_status = droplet.get("status", None) + if droplet_id is not None and droplet_status is not None: + if state == "active" and droplet_status != "active": + self.ensure_power_on(droplet_id) + # Get updated Droplet data (fallback to current data) + json_data = self.get_droplet() + droplet = json_data.get("droplet", droplet) + self.module.exit_json(changed=True, data={"droplet": droplet}) + elif state == "inactive" and droplet_status != "off": + self.ensure_power_off(droplet_id) + # Get updated Droplet data (fallback to current data) + json_data = self.get_droplet() + droplet = json_data.get("droplet", droplet) + self.module.exit_json(changed=True, data={"droplet": droplet}) + else: + self.module.exit_json(changed=False, data={"droplet": droplet}) + + # We don't have the Droplet, create it + + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + request_params = dict(self.module.params) + del request_params["id"] + + response = self.rest.post("droplets", data=request_params) + json_data = response.json + status_code = response.status_code + message = json_data.get("message", "no error message") + droplet = json_data.get("droplet", None) + + # Ensure that the Droplet is created + if status_code != 202: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["failed_to"].format( + "create", "Droplet", status_code, message + ), + ) + + droplet_id = droplet.get("id", None) + if droplet is None or droplet_id is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format("no Droplet or ID"), + ) + + if status_code >= 400: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["failed_to"].format( + "create", "Droplet", status_code, message + ), + ) + + if self.wait: + if state == "present" or state == "active": + self.ensure_power_on(droplet_id) + if state == "inactive": + self.ensure_power_off(droplet_id) + else: + if state == "inactive": + self.ensure_power_off(droplet_id) + + # Get updated Droplet data (fallback to current data) + if self.wait: + json_data = self.get_by_id(droplet_id) + if json_data: + droplet = json_data.get("droplet", droplet) + + project_name = self.module.params.get("project") + if project_name: # empty string is the default project, skip project assignment + urn = "do:droplet:{0}".format(droplet_id) + assign_status, error_message, resources = self.projects.assign_to_project( + project_name, urn + ) + self.module.exit_json( + changed=True, + data={"droplet": droplet}, + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + # Add droplet to firewall if specified + if self.module.params["firewall"] is not None: + # raise Exception(self.module.params["firewall"]) + firewall_add = self.add_droplet_to_firewalls() + if firewall_add is not None: + self.module.fail_json( + changed=False, + msg=firewall_add, + data={"droplet": droplet, "firewall": firewall_add}, + ) + firewall_remove = self.remove_droplet_from_firewalls() + if firewall_remove is not None: + self.module.fail_json( + changed=False, + msg=firewall_remove, + data={"droplet": droplet, "firewall": firewall_remove}, + ) + self.module.exit_json(changed=True, data={"droplet": droplet}) + + self.module.exit_json(changed=True, data={"droplet": droplet}) + + def delete(self): + # to delete a droplet we need to know the droplet id or unique name, ie + # name is not None and unique_name is True, but as "id or name" is + # enforced elsewhere, we only need to enforce "id or unique_name" here + if not self.module.params["id"] and not self.unique_name: + self.module.fail_json( + changed=False, + msg="id must be set or unique_name must be true for deletes", + ) + json_data = self.get_droplet() + if json_data is None: + self.module.exit_json(changed=False, msg="Droplet not found") + + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + # Delete it + droplet = json_data.get("droplet", None) + droplet_id = droplet.get("id", None) + droplet_name = droplet.get("name", None) + + if droplet is None or droplet_id is None: + self.module.fail_json( + changed=False, + msg=DODroplet.failure_message["unexpected"].format( + "no Droplet, name, or ID" + ), + ) + + response = self.rest.delete("droplets/{0}".format(droplet_id)) + json_data = response.json + status_code = response.status_code + if status_code == 204: + self.module.exit_json( + changed=True, + msg="Droplet {0} ({1}) deleted".format(droplet_name, droplet_id), + ) + else: + self.module.fail_json( + changed=False, + msg="Failed to delete Droplet {0} ({1})".format( + droplet_name, droplet_id + ), + ) + + +def core(module): + state = module.params.pop("state") + droplet = DODroplet(module) + if state in ["present", "active", "inactive"]: + droplet.create(state) + elif state == "absent": + droplet.delete() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict( + choices=["present", "absent", "active", "inactive"], default="present" + ), + name=dict(type="str"), + size=dict(aliases=["size_id"]), + image=dict(aliases=["image_id"]), + region=dict(aliases=["region_id"]), + ssh_keys=dict(type="list", elements="str", no_log=False), + private_networking=dict(type="bool", default=False), + vpc_uuid=dict(type="str"), + backups=dict(type="bool", default=False), + monitoring=dict(type="bool", default=False), + id=dict(aliases=["droplet_id"], type="int"), + user_data=dict(default=None), + ipv6=dict(type="bool", default=False), + volumes=dict(type="list", elements="str"), + tags=dict(type="list", elements="str"), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), + unique_name=dict(type="bool", default=False), + resize_disk=dict(type="bool", default=False), + project_name=dict(type="str", aliases=["project"], required=False, default=""), + firewall=dict(type="list", elements="str", default=None), + sleep_interval=dict(default=10, type="int"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(["id", "name"],), + required_if=( + [ + ("state", "present", ["name", "size", "image", "region"]), + ("state", "active", ["name", "size", "image", "region"]), + ("state", "inactive", ["name", "size", "image", "region"]), + ] + ), + supports_check_mode=True, + ) + + core(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_droplet_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_droplet_info.py new file mode 100644 index 00000000..474b9af2 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_droplet_info.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2020, Tyler Auerbeck <tauerbec@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_droplet_info +short_description: Gather information about DigitalOcean Droplets +description: + - This module can be used to gather information about Droplets. +author: "Tyler Auerbeck (@tylerauerbeck)" +version_added: 1.4.0 + +options: + id: + description: + - Droplet ID that can be used to identify and reference a droplet. + type: str + name: + description: + - Droplet name that can be used to identify and reference a droplet. + type: str + +extends_documentation_fragment: +- community.digitalocean.digital_ocean +""" + + +EXAMPLES = r""" +- name: Gather information about all droplets + community.digitalocean.digital_ocean_droplet_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about a specific droplet by name + community.digitalocean.digital_ocean_droplet_info: + oauth_token: "{{ oauth_token }}" + name: my-droplet-name + +- name: Gather information about a specific droplet by id + community.digitalocean.digital_ocean_droplet_info: + oauth_token: "{{ oauth_token }}" + id: abc-123-d45 + +- name: Get information about all droplets to loop through + community.digitalocean.digital_ocean_droplet_info: + oauth_token: "{{ oauth_token }}" + register: droplets + +- name: Get number of droplets + set_fact: + droplet_count: "{{ droplets.data | length }}" +""" + +RETURN = r""" +data: + description: "DigitalOcean droplet information" + elements: dict + returned: success + sample: + - backup_ids: [] + created_at: "2021-04-07T00:44:53Z" + disk: 25 + features: + - private_networking + id: 123456789 + image: + created_at: "2020-10-20T08:49:55Z" + description: "Ubuntu 18.04 x86 image" + distribution: Ubuntu + id: 987654321 + min_disk_size: 15 + name: "18.04 (LTS) x64" + public: false + regions: [] + size_gigabytes: 0.34 + slug: ~ + status: retired + tags: [] + type: base + kernel: ~ + locked: false + memory: 1024 + name: my-droplet-01 + networks: + v4: + - gateway: "" + ip_address: "1.2.3.4" + netmask: "255.255.240.0" + type: private + - gateway: "5.6.7.8" + ip_address: "4.3.2.1" + netmask: "255.255.240.0" + type: public + v6: [] + next_backup_window: ~ + region: + available: true + features: + - backups + - ipv6 + - metadata + - install_agent + - storage + - image_transfer + name: "New York 1" + sizes: + - s-1vcpu-1gb + - s-1vcpu-1gb-intel + - s-1vcpu-2gb + - s-1vcpu-2gb-intel + - s-2vcpu-2gb + - s-2vcpu-2gb-intel + - s-2vcpu-4gb + - s-2vcpu-4gb-intel + - s-4vcpu-8gb + - c-2 + - c2-2vcpu-4gb + - s-4vcpu-8gb-intel + - g-2vcpu-8gb + - gd-2vcpu-8gb + - s-8vcpu-16gb + - m-2vcpu-16gb + - c-4 + - c2-4vcpu-8gb + - s-8vcpu-16gb-intel + - m3-2vcpu-16gb + - g-4vcpu-16gb + - so-2vcpu-16gb + - m6-2vcpu-16gb + - gd-4vcpu-16gb + - so1_5-2vcpu-16gb + - m-4vcpu-32gb + - c-8 + - c2-8vcpu-16gb + - m3-4vcpu-32gb + - g-8vcpu-32gb + - so-4vcpu-32gb + - m6-4vcpu-32gb + - gd-8vcpu-32gb + - so1_5-4vcpu-32gb + - m-8vcpu-64gb + - c-16 + - c2-16vcpu-32gb + - m3-8vcpu-64gb + - g-16vcpu-64gb + - so-8vcpu-64gb + - m6-8vcpu-64gb + - gd-16vcpu-64gb + - so1_5-8vcpu-64gb + - m-16vcpu-128gb + - c-32 + - c2-32vcpu-64gb + - m3-16vcpu-128gb + - m-24vcpu-192gb + - g-32vcpu-128gb + - so-16vcpu-128gb + - m6-16vcpu-128gb + - gd-32vcpu-128gb + - m3-24vcpu-192gb + - g-40vcpu-160gb + - so1_5-16vcpu-128gb + - m-32vcpu-256gb + - gd-40vcpu-160gb + - so-24vcpu-192gb + - m6-24vcpu-192gb + - m3-32vcpu-256gb + - so1_5-24vcpu-192gb + - so-32vcpu-256gb + - m6-32vcpu-256gb + - so1_5-32vcpu-256gb + slug: nyc1 + size: + available: true + description: Basic + disk: 25 + memory: 1024 + price_hourly: 0.00744 + price_monthly: 5.0 + regions: + - ams2 + - ams3 + - blr1 + - fra1 + - lon1 + - nyc1 + - nyc2 + - nyc3 + - sfo1 + - sfo3 + - sgp1 + - tor1 + slug: s-1vcpu-1gb + transfer: 1.0 + vcpus: 1 + size_slug: s-1vcpu-1gb + snapshot_ids: [] + status: active + tags: + - tag1 + vcpus: 1 + volume_ids: [] + vpc_uuid: 123-abc-567a + type: list +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +def run(module): + rest = DigitalOceanHelper(module) + + if module.params["id"]: + path = "droplets/" + module.params["id"] + response = rest.get(path) + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'droplets' information due to error: %s" + % response.json["message"] + ) + else: + response = rest.get_paginated_data( + base_url="droplets?", data_key_name="droplets" + ) + + if module.params["id"]: + data = [response.json["droplet"]] + elif module.params["name"]: + data = [d for d in response if d["name"] == module.params["name"]] + if not data: + module.fail_json( + msg="Failed to fetch 'droplets' information due to error: Unable to find droplet with name %s" + % module.params["name"] + ) + else: + data = response + + module.exit_json(changed=False, data=data) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=False, default=None), + id=dict(type="str", required=False, default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[("id", "name")], + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall.py new file mode 100644 index 00000000..24b7c420 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall.py @@ -0,0 +1,560 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Anthony Bond <ajbond2005@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: digital_ocean_firewall +short_description: Manage cloud firewalls within DigitalOcean +description: + - This module can be used to add or remove firewalls on the DigitalOcean cloud platform. +author: + - Anthony Bond (@BondAnthony) + - Lucas Basquerotto (@lucasbasquerotto) +version_added: "1.1.0" +options: + name: + type: str + description: + - Name of the firewall rule to create or manage + required: true + state: + type: str + choices: ['present', 'absent'] + default: present + description: + - Assert the state of the firewall rule. Set to 'present' to create or update and 'absent' to remove. + droplet_ids: + type: list + elements: str + description: + - List of droplet ids to be assigned to the firewall + required: false + tags: + type: list + elements: str + description: + - List of tags to be assigned to the firewall + required: false + inbound_rules: + type: list + elements: dict + description: + - Firewall rules specifically targeting inbound network traffic into DigitalOcean + required: false + suboptions: + protocol: + type: str + choices: ['udp', 'tcp', 'icmp'] + default: tcp + description: + - Network protocol to be accepted. + required: false + ports: + type: str + description: + - The ports on which traffic will be allowed, single, range, or all + required: true + sources: + type: dict + description: + - Dictionary of locations from which inbound traffic will be accepted + required: true + suboptions: + addresses: + type: list + elements: str + description: + - List of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, + and/or IPv6 CIDRs to which the firewall will allow traffic + required: false + droplet_ids: + type: list + elements: str + description: + - List of integers containing the IDs of the Droplets to which the firewall will allow traffic + required: false + load_balancer_uids: + type: list + elements: str + description: + - List of strings containing the IDs of the Load Balancers to which the firewall will allow traffic + required: false + tags: + type: list + elements: str + description: + - List of strings containing the names of Tags corresponding to groups of Droplets to + which the Firewall will allow traffic + required: false + outbound_rules: + type: list + elements: dict + description: + - Firewall rules specifically targeting outbound network traffic from DigitalOcean + required: false + suboptions: + protocol: + type: str + choices: ['udp', 'tcp', 'icmp'] + default: tcp + description: + - Network protocol to be accepted. + required: false + ports: + type: str + description: + - The ports on which traffic will be allowed, single, range, or all + required: true + destinations: + type: dict + description: + - Dictionary of locations from which outbound traffic will be allowed + required: true + suboptions: + addresses: + type: list + elements: str + description: + - List of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, + and/or IPv6 CIDRs to which the firewall will allow traffic + required: false + droplet_ids: + type: list + elements: str + description: + - List of integers containing the IDs of the Droplets to which the firewall will allow traffic + required: false + load_balancer_uids: + type: list + elements: str + description: + - List of strings containing the IDs of the Load Balancers to which the firewall will allow traffic + required: false + tags: + type: list + elements: str + description: + - List of strings containing the names of Tags corresponding to groups of Droplets to + which the Firewall will allow traffic + required: false +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + +EXAMPLES = """ +# Allows tcp connections to port 22 (SSH) from specific sources +# Allows tcp connections to ports 80 and 443 from any source +# Allows outbound access to any destination for protocols tcp, udp and icmp +# The firewall rules will be applied to any droplets with the tag "sample" +- name: Create a Firewall named my-firewall + digital_ocean_firewall: + name: my-firewall + state: present + inbound_rules: + - protocol: "tcp" + ports: "22" + sources: + addresses: ["1.2.3.4"] + droplet_ids: ["my_droplet_id_1", "my_droplet_id_2"] + load_balancer_uids: ["my_lb_id_1", "my_lb_id_2"] + tags: ["tag_1", "tag_2"] + - protocol: "tcp" + ports: "80" + sources: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "tcp" + ports: "443" + sources: + addresses: ["0.0.0.0/0", "::/0"] + outbound_rules: + - protocol: "tcp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "udp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "icmp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + droplet_ids: [] + tags: ["sample"] +""" + +RETURN = """ +data: + description: DigitalOcean firewall resource + returned: success + type: dict + sample: { + "created_at": "2020-08-11T18:41:30Z", + "droplet_ids": [], + "id": "7acd6ee2-257b-434f-8909-709a5816d4f9", + "inbound_rules": [ + { + "ports": "443", + "protocol": "tcp", + "sources": { + "addresses": [ + "1.2.3.4" + ], + "droplet_ids": [ + "my_droplet_id_1", + "my_droplet_id_2" + ], + "load_balancer_uids": [ + "my_lb_id_1", + "my_lb_id_2" + ], + "tags": [ + "tag_1", + "tag_2" + ] + } + }, + { + "sources": { + "addresses": [ + "0.0.0.0/0", + "::/0" + ] + }, + "ports": "80", + "protocol": "tcp" + }, + { + "sources": { + "addresses": [ + "0.0.0.0/0", + "::/0" + ] + }, + "ports": "443", + "protocol": "tcp" + } + ], + "name": "my-firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": [ + "0.0.0.0/0", + "::/0" + ] + }, + "ports": "1-65535", + "protocol": "tcp" + }, + { + "destinations": { + "addresses": [ + "0.0.0.0/0", + "::/0" + ] + }, + "ports": "1-65535", + "protocol": "udp" + }, + { + "destinations": { + "addresses": [ + "0.0.0.0/0", + "::/0" + ] + }, + "ports": "1-65535", + "protocol": "icmp" + } + ], + "pending_changes": [], + "status": "succeeded", + "tags": ["sample"] + } +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + +address_spec = dict( + addresses=dict(type="list", elements="str", required=False), + droplet_ids=dict(type="list", elements="str", required=False), + load_balancer_uids=dict(type="list", elements="str", required=False), + tags=dict(type="list", elements="str", required=False), +) + +inbound_spec = dict( + protocol=dict(type="str", choices=["udp", "tcp", "icmp"], default="tcp"), + ports=dict(type="str", required=True), + sources=dict(type="dict", required=True, options=address_spec), +) + +outbound_spec = dict( + protocol=dict(type="str", choices=["udp", "tcp", "icmp"], default="tcp"), + ports=dict(type="str", required=True), + destinations=dict(type="dict", required=True, options=address_spec), +) + + +class DOFirewall(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + self.name = self.module.params.get("name") + self.baseurl = "firewalls" + self.firewalls = self.get_firewalls() + + def get_firewalls(self): + base_url = self.baseurl + "?" + response = self.rest.get("%s" % base_url) + status_code = response.status_code + status_code_success = 200 + + if status_code != status_code_success: + error = response.json + info = response.info + + if error: + error.update({"status_code": status_code}) + error.update({"status_code_success": status_code_success}) + self.module.fail_json(msg=error) + elif info: + info.update({"status_code_success": status_code_success}) + self.module.fail_json(msg=info) + else: + msg_error = "Failed to retrieve firewalls from DigitalOcean" + self.module.fail_json( + msg=msg_error + + " (url=" + + self.rest.baseurl + + "/" + + self.baseurl + + ", status=" + + str(status_code or "") + + " - expected:" + + str(status_code_success) + + ")" + ) + + return self.rest.get_paginated_data( + base_url=base_url, data_key_name="firewalls" + ) + + def get_firewall_by_name(self): + rule = {} + for firewall in self.firewalls: + if firewall["name"] == self.name: + rule.update(firewall) + return rule + return None + + def ordered(self, obj): + if isinstance(obj, dict): + return sorted((k, self.ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(self.ordered(x) for x in obj) + else: + return obj + + def fill_protocol_defaults(self, obj): + if obj.get("protocol") is None: + obj["protocol"] = "tcp" + + return obj + + def fill_source_and_destination_defaults_inner(self, obj): + addresses = obj.get("addresses") or [] + + droplet_ids = obj.get("droplet_ids") or [] + droplet_ids = [str(droplet_id) for droplet_id in droplet_ids] + + load_balancer_uids = obj.get("load_balancer_uids") or [] + load_balancer_uids = [str(uid) for uid in load_balancer_uids] + + tags = obj.get("tags") or [] + + data = { + "addresses": addresses, + "droplet_ids": droplet_ids, + "load_balancer_uids": load_balancer_uids, + "tags": tags, + } + + return data + + def fill_sources_and_destinations_defaults(self, obj, prop): + value = obj.get(prop) + + if value is None: + value = {} + else: + value = self.fill_source_and_destination_defaults_inner(value) + + obj[prop] = value + + return obj + + def fill_data_defaults(self, obj): + inbound_rules = obj.get("inbound_rules") + + if inbound_rules is None: + inbound_rules = [] + else: + inbound_rules = [self.fill_protocol_defaults(x) for x in inbound_rules] + inbound_rules = [ + self.fill_sources_and_destinations_defaults(x, "sources") + for x in inbound_rules + ] + + outbound_rules = obj.get("outbound_rules") + + if outbound_rules is None: + outbound_rules = [] + else: + outbound_rules = [self.fill_protocol_defaults(x) for x in outbound_rules] + outbound_rules = [ + self.fill_sources_and_destinations_defaults(x, "destinations") + for x in outbound_rules + ] + + droplet_ids = obj.get("droplet_ids") or [] + droplet_ids = [str(droplet_id) for droplet_id in droplet_ids] + + tags = obj.get("tags") or [] + + data = { + "name": obj.get("name"), + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + "droplet_ids": droplet_ids, + "tags": tags, + } + + return data + + def data_to_compare(self, obj): + return self.ordered(self.fill_data_defaults(obj)) + + def update(self, obj, id): + if id is None: + status_code_success = 202 + resp = self.rest.post(path=self.baseurl, data=obj) + else: + status_code_success = 200 + resp = self.rest.put(path=self.baseurl + "/" + id, data=obj) + status_code = resp.status_code + if status_code != status_code_success: + error = resp.json + error.update( + { + "context": "error when trying to " + + ("create" if (id is None) else "update") + + " firewalls" + } + ) + error.update({"status_code": status_code}) + error.update({"status_code_success": status_code_success}) + self.module.fail_json(msg=error) + self.module.exit_json(changed=True, data=resp.json["firewall"]) + + def create(self): + rule = self.get_firewall_by_name() + data = { + "name": self.module.params.get("name"), + "inbound_rules": self.module.params.get("inbound_rules"), + "outbound_rules": self.module.params.get("outbound_rules"), + "droplet_ids": self.module.params.get("droplet_ids"), + "tags": self.module.params.get("tags"), + } + if rule is None: + self.update(data, None) + else: + rule_data = { + "name": rule.get("name"), + "inbound_rules": rule.get("inbound_rules"), + "outbound_rules": rule.get("outbound_rules"), + "droplet_ids": rule.get("droplet_ids"), + "tags": rule.get("tags"), + } + + user_data = { + "name": data.get("name"), + "inbound_rules": data.get("inbound_rules"), + "outbound_rules": data.get("outbound_rules"), + "droplet_ids": data.get("droplet_ids"), + "tags": data.get("tags"), + } + + if self.data_to_compare(user_data) == self.data_to_compare(rule_data): + self.module.exit_json(changed=False, data=rule) + else: + self.update(data, rule.get("id")) + + def destroy(self): + rule = self.get_firewall_by_name() + if rule is None: + self.module.exit_json(changed=False, data="Firewall does not exist") + else: + endpoint = self.baseurl + "/" + rule["id"] + resp = self.rest.delete(path=endpoint) + status_code = resp.status_code + if status_code != 204: + self.module.fail_json(msg="Failed to delete firewall") + self.module.exit_json( + changed=True, + data="Deleted firewall rule: {0} - {1}".format( + rule["name"], rule["id"] + ), + ) + + +def core(module): + state = module.params.get("state") + firewall = DOFirewall(module) + + if state == "present": + firewall.create() + elif state == "absent": + firewall.destroy() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=True), + state=dict(type="str", choices=["present", "absent"], default="present"), + droplet_ids=dict(type="list", elements="str", required=False), + tags=dict(type="list", elements="str", required=False), + inbound_rules=dict( + type="list", elements="dict", options=inbound_spec, required=False + ), + outbound_rules=dict( + type="list", elements="dict", options=outbound_spec, required=False + ), + ), + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["inbound_rules", "outbound_rules"])], + ) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall_facts.py new file mode 100644 index 00000000..44239667 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall_facts.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Anthony Bond <ajbond2005@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_firewall_info +short_description: Gather information about DigitalOcean firewalls +description: + - This module can be used to gather information about DigitalOcean firewalls. + - This module was called C(digital_ocean_firewall_facts) before Ansible 2.9. The usage did not change. +author: "Anthony Bond (@BondAnthony)" +options: + name: + description: + - Firewall rule name that can be used to identify and reference a specific firewall rule. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all firewalls + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about a specific firewall by name + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ oauth_token }}" + name: "firewall_name" + +- name: Gather information from a firewall rule + community.digitalocean.digital_ocean_firewall_info: + name: SSH + register: resp_out + +- set_fact: + firewall_id: "{{ resp_out.data.id }}" + +- debug: + msg: "{{ firewall_id }}" +""" + + +RETURN = r""" +data: + description: DigitalOcean firewall information + returned: success + type: list + elements: dict + sample: [ + { + "id": "435tbg678-1db53-32b6-t543-28322569t252", + "name": "metrics", + "status": "succeeded", + "inbound_rules": [ + { + "protocol": "tcp", + "ports": "9100", + "sources": { + "addresses": [ + "1.1.1.1" + ] + } + } + ], + "outbound_rules": [], + "created_at": "2018-01-15T07:04:25Z", + "droplet_ids": [ + 87426985 + ], + "tags": [], + "pending_changes": [] + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + firewall_name = module.params.get("name", None) + rest = DigitalOceanHelper(module) + base_url = "firewalls?" + + response = rest.get("%s" % base_url) + status_code = response.status_code + if status_code != 200: + module.fail_json(msg="Failed to retrieve firewalls from Digital Ocean") + firewalls = rest.get_paginated_data(base_url=base_url, data_key_name="firewalls") + + if firewall_name is not None: + rule = {} + for firewall in firewalls: + if firewall["name"] == firewall_name: + rule.update(firewall) + firewalls = [rule] + module.exit_json(changed=False, data=firewalls) + else: + module.exit_json(changed=False, data=firewalls) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_firewall_facts", + "community.digitalocean.digital_ocean_firewall_facts", + ): + module.deprecate( + "The 'digital_ocean_firewall_facts' module has been renamed to 'digital_ocean_firewall_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall_info.py new file mode 100644 index 00000000..44239667 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_firewall_info.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Anthony Bond <ajbond2005@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_firewall_info +short_description: Gather information about DigitalOcean firewalls +description: + - This module can be used to gather information about DigitalOcean firewalls. + - This module was called C(digital_ocean_firewall_facts) before Ansible 2.9. The usage did not change. +author: "Anthony Bond (@BondAnthony)" +options: + name: + description: + - Firewall rule name that can be used to identify and reference a specific firewall rule. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all firewalls + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about a specific firewall by name + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ oauth_token }}" + name: "firewall_name" + +- name: Gather information from a firewall rule + community.digitalocean.digital_ocean_firewall_info: + name: SSH + register: resp_out + +- set_fact: + firewall_id: "{{ resp_out.data.id }}" + +- debug: + msg: "{{ firewall_id }}" +""" + + +RETURN = r""" +data: + description: DigitalOcean firewall information + returned: success + type: list + elements: dict + sample: [ + { + "id": "435tbg678-1db53-32b6-t543-28322569t252", + "name": "metrics", + "status": "succeeded", + "inbound_rules": [ + { + "protocol": "tcp", + "ports": "9100", + "sources": { + "addresses": [ + "1.1.1.1" + ] + } + } + ], + "outbound_rules": [], + "created_at": "2018-01-15T07:04:25Z", + "droplet_ids": [ + 87426985 + ], + "tags": [], + "pending_changes": [] + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + firewall_name = module.params.get("name", None) + rest = DigitalOceanHelper(module) + base_url = "firewalls?" + + response = rest.get("%s" % base_url) + status_code = response.status_code + if status_code != 200: + module.fail_json(msg="Failed to retrieve firewalls from Digital Ocean") + firewalls = rest.get_paginated_data(base_url=base_url, data_key_name="firewalls") + + if firewall_name is not None: + rule = {} + for firewall in firewalls: + if firewall["name"] == firewall_name: + rule.update(firewall) + firewalls = [rule] + module.exit_json(changed=False, data=firewalls) + else: + module.exit_json(changed=False, data=firewalls) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_firewall_facts", + "community.digitalocean.digital_ocean_firewall_facts", + ): + module.deprecate( + "The 'digital_ocean_firewall_facts' module has been renamed to 'digital_ocean_firewall_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip.py new file mode 100644 index 00000000..d4d6ff26 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip.py @@ -0,0 +1,519 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_floating_ip +short_description: Manage DigitalOcean Floating IPs +description: + - Create/delete/assign a floating IP. +author: + - "Patrick Marques (@pmarques)" + - "Daniel George (@danxg87)" +options: + state: + description: + - Indicate desired state of the target. + - If C(state=present) Create (and optionally attach) floating IP + - If C(state=absent) Delete floating IP + - If C(state=attached) attach floating IP to a droplet + - If C(state=detached) detach floating IP from a droplet + default: present + choices: ['present', 'absent', 'attached', 'detached'] + type: str + ip: + description: + - Public IP address of the Floating IP. Used to remove an IP + type: str + aliases: ['id'] + region: + description: + - The region that the Floating IP is reserved to. + type: str + droplet_id: + description: + - The Droplet that the Floating IP has been assigned to. + type: str + oauth_token: + description: + - DigitalOcean OAuth token. + required: true + type: str + timeout: + description: + - Floating IP creation timeout. + type: int + default: 30 + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: true + project_name: + aliases: ["project"] + description: + - Project to assign the resource to (project name, not UUID). + - Defaults to the default project of the account (empty string). + - Currently only supported when creating. + type: str + required: false + default: "" +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: "Create a Floating IP in region lon1" + community.digitalocean.digital_ocean_floating_ip: + state: present + region: lon1 + +- name: Create a Floating IP in region lon1 (and assign to Project "test") + community.digitalocean.digital_ocean_floating_ip: + state: present + region: lon1 + project: test + +- name: "Create a Floating IP assigned to Droplet ID 123456" + community.digitalocean.digital_ocean_floating_ip: + state: present + droplet_id: 123456 + +- name: "Attach an existing Floating IP of 1.2.3.4 to Droplet ID 123456" + community.digitalocean.digital_ocean_floating_ip: + state: attached + ip: "1.2.3.4" + droplet_id: 123456 + +- name: "Detach an existing Floating IP of 1.2.3.4 from its Droplet" + community.digitalocean.digital_ocean_floating_ip: + state: detached + ip: "1.2.3.4" + +- name: "Delete a Floating IP with ip 1.2.3.4" + community.digitalocean.digital_ocean_floating_ip: + state: absent + ip: "1.2.3.4" + +""" + + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs +data: + description: a DigitalOcean Floating IP resource + returned: success and no resource constraint + type: dict + sample: + action: + id: 68212728 + status: in-progress + type: assign_ip + started_at: '2015-10-15T17:45:44Z' + completed_at: null + resource_id: 758603823 + resource_type: floating_ip + region: + name: New York 3 + slug: nyc3 + sizes: + - 512mb, + - 1gb, + - 2gb, + - 4gb, + - 8gb, + - 16gb, + - 32gb, + - 48gb, + - 64gb + features: + - private_networking + - backups + - ipv6 + - metadata + available: true + region_slug: nyc3 +msg: + description: Informational or error message encountered during execution + returned: changed + type: str + sample: No project named test2 found +assign_status: + description: Assignment status (ok, not_found, assigned, already_assigned, service_down) + returned: changed + type: str + sample: assigned +resources: + description: Resource assignment involved in project assignment + returned: changed + type: dict + sample: + assigned_at: '2021-10-25T17:39:38Z' + links: + self: https://api.digitalocean.com/v2/floating_ips/157.230.64.107 + status: assigned + urn: do:floatingip:157.230.64.107 +""" + +import json +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url + +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, + DigitalOceanProjects, +) + + +class Response(object): + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + def __init__(self, module, headers): + self.module = module + self.headers = headers + self.baseurl = "https://api.digitalocean.com/v2" + + def _url_builder(self, path): + if path[0] == "/": + path = path[1:] + return "%s/%s" % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + timeout = self.module.params["timeout"] + + resp, info = fetch_url( + self.module, + url, + data=data, + headers=self.headers, + method=method, + timeout=timeout, + ) + + # Exceptions in fetch_url may result in a status -1, the ensures a + if info["status"] == -1: + self.module.fail_json(msg=info["msg"]) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send("GET", path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send("PUT", path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send("POST", path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send("DELETE", path, data, headers) + + +def wait_action(module, rest, ip, action_id, timeout=60): + end_time = time.monotonic() + timeout + while time.monotonic() < end_time: + response = rest.get("floating_ips/{0}/actions/{1}".format(ip, action_id)) + json_data = response.json + status_code = response.status_code + status = response.json["action"]["status"] + if status_code == 200: + if status == "completed": + return json_data + elif status == "errored": + module.fail_json( + msg="Floating ip action error [ip: {0}: action: {1}]".format( + ip, action_id + ), + data=json, + ) + time.sleep(10) + module.fail_json( + msg="Floating ip action timeout [ip: {0}: action: {1}]".format(ip, action_id), + data=json, + ) + + +def core(module): + api_token = module.params["oauth_token"] + state = module.params["state"] + ip = module.params["ip"] + droplet_id = module.params["droplet_id"] + + rest = Rest( + module, + { + "Authorization": "Bearer {0}".format(api_token), + "Content-type": "application/json", + }, + ) + + if state in ("present"): + if droplet_id is not None and module.params["ip"] is not None: + # Lets try to associate the ip to the specified droplet + associate_floating_ips(module, rest) + else: + create_floating_ips(module, rest) + + elif state in ("attached"): + if droplet_id is not None and module.params["ip"] is not None: + associate_floating_ips(module, rest) + + elif state in ("detached"): + if module.params["ip"] is not None: + detach_floating_ips(module, rest, module.params["ip"]) + + elif state in ("absent"): + response = rest.delete("floating_ips/{0}".format(ip)) + status_code = response.status_code + json_data = response.json + if status_code == 204: + module.exit_json(changed=True) + elif status_code == 404: + module.exit_json(changed=False) + else: + module.exit_json(changed=False, data=json_data) + + +def get_floating_ip_details(module, rest): + ip = module.params["ip"] + + response = rest.get("floating_ips/{0}".format(ip)) + status_code = response.status_code + json_data = response.json + if status_code == 200: + return json_data["floating_ip"] + else: + module.fail_json( + msg="Error assigning floating ip [{0}: {1}]".format( + status_code, json_data["message"] + ), + region=module.params["region"], + ) + + +def assign_floating_id_to_droplet(module, rest): + ip = module.params["ip"] + + payload = { + "type": "assign", + "droplet_id": module.params["droplet_id"], + } + + response = rest.post("floating_ips/{0}/actions".format(ip), data=payload) + status_code = response.status_code + json_data = response.json + if status_code == 201: + json_data = wait_action(module, rest, ip, json_data["action"]["id"]) + + module.exit_json(changed=True, data=json_data) + else: + module.fail_json( + msg="Error creating floating ip [{0}: {1}]".format( + status_code, json_data["message"] + ), + region=module.params["region"], + ) + + +def detach_floating_ips(module, rest, ip): + payload = {"type": "unassign"} + response = rest.post("floating_ips/{0}/actions".format(ip), data=payload) + status_code = response.status_code + json_data = response.json + + if status_code == 201: + json_data = wait_action(module, rest, ip, json_data["action"]["id"]) + module.exit_json( + changed=True, msg="Detached floating ip {0}".format(ip), data=json_data + ) + action = json_data.get("action", None) + action_id = action.get("id", None) + if action is None: + module.fail_json( + changed=False, + msg="Error retrieving detach action. Got: {0}".format(action), + ) + if action_id is None: + module.fail_json( + changed=False, + msg="Error retrieving detach action ID. Got: {0}".format(action_id), + ) + else: + module.fail_json( + changed=False, + msg="Error detaching floating ip [{0}: {1}]".format( + status_code, json_data["message"] + ), + ) + + +def associate_floating_ips(module, rest): + floating_ip = get_floating_ip_details(module, rest) + droplet = floating_ip["droplet"] + + # TODO: If already assigned to a droplet verify if is one of the specified as valid + if droplet is not None and str(droplet["id"]) in [module.params["droplet_id"]]: + module.exit_json(changed=False) + else: + assign_floating_id_to_droplet(module, rest) + + +def create_floating_ips(module, rest): + payload = {} + + if module.params["region"] is not None: + payload["region"] = module.params["region"] + if module.params["droplet_id"] is not None: + payload["droplet_id"] = module.params["droplet_id"] + + # Get existing floating IPs + response = rest.get("floating_ips/") + status_code = response.status_code + json_data = response.json + + # Exit unchanged if any of them are assigned to this Droplet already + if status_code == 200: + floating_ips = json_data.get("floating_ips", []) + if len(floating_ips) != 0: + for floating_ip in floating_ips: + droplet = floating_ip.get("droplet", None) + if droplet is not None: + droplet_id = droplet.get("id", None) + if droplet_id is not None: + if str(droplet_id) == module.params["droplet_id"]: + ip = floating_ip.get("ip", None) + if ip is not None: + module.exit_json( + changed=False, data={"floating_ip": floating_ip} + ) + else: + module.fail_json( + changed=False, + msg="Unexpected error querying floating ip", + ) + + response = rest.post("floating_ips", data=payload) + status_code = response.status_code + json_data = response.json + if status_code == 202: + if module.params.get( + "project" + ): # only load for non-default project assignments + rest = DigitalOceanHelper(module) + projects = DigitalOceanProjects(module, rest) + project_name = module.params.get("project") + if ( + project_name + ): # empty string is the default project, skip project assignment + floating_ip = json_data.get("floating_ip") + ip = floating_ip.get("ip") + if ip: + urn = "do:floatingip:{0}".format(ip) + ( + assign_status, + error_message, + resources, + ) = projects.assign_to_project(project_name, urn) + module.exit_json( + changed=True, + data=json_data, + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + else: + module.exit_json( + changed=True, + msg="Floating IP created but not assigned to the {0} Project (missing information from the API response)".format( + project_name + ), + data=json_data, + ) + else: + module.exit_json(changed=True, data=json_data) + else: + module.exit_json(changed=True, data=json_data) + else: + module.fail_json( + msg="Error creating floating ip [{0}: {1}]".format( + status_code, json_data["message"] + ), + region=module.params["region"], + ) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict( + choices=["present", "absent", "attached", "detached"], default="present" + ), + ip=dict(aliases=["id"], required=False), + region=dict(required=False), + droplet_id=dict(required=False), + oauth_token=dict( + no_log=True, + # Support environment variable for DigitalOcean OAuth Token + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + required=True, + ), + validate_certs=dict(type="bool", default=True), + timeout=dict(type="int", default=30), + project_name=dict( + type="str", aliases=["project"], required=False, default="" + ), + ), + required_if=[ + ("state", "delete", ["ip"]), + ("state", "attached", ["ip", "droplet_id"]), + ("state", "detached", ["ip"]), + ], + mutually_exclusive=[["region", "droplet_id"]], + ) + + core(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip_facts.py new file mode 100644 index 00000000..3d232a4a --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip_facts.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (C) 2017-18, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_floating_ip_info +short_description: DigitalOcean Floating IPs information +description: + - This module can be used to fetch DigitalOcean Floating IPs information. + - This module was called C(digital_ocean_floating_ip_facts) before Ansible 2.9. The usage did not change. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: "Gather information about all Floating IPs" + community.digitalocean.digital_ocean_floating_ip_info: + register: result + +- name: "List of current floating ips" + debug: + var: result.floating_ips +""" + + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs +floating_ips: + description: a DigitalOcean Floating IP resource + returned: success and no resource constraint + type: list + sample: [ + { + "ip": "45.55.96.47", + "droplet": null, + "region": { + "name": "New York 3", + "slug": "nyc3", + "sizes": [ + "512mb", + "1gb", + "2gb", + "4gb", + "8gb", + "16gb", + "32gb", + "48gb", + "64gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "locked": false + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + page = 1 + has_next = True + floating_ips = [] + status_code = None + while has_next or status_code != 200: + response = rest.get("floating_ips?page={0}&per_page=20".format(page)) + status_code = response.status_code + # stop if any error during pagination + if status_code != 200: + break + page += 1 + floating_ips.extend(response.json["floating_ips"]) + has_next = ( + "pages" in response.json["links"] + and "next" in response.json["links"]["pages"] + ) + + if status_code == 200: + module.exit_json(changed=False, floating_ips=floating_ips) + else: + module.fail_json( + msg="Error fetching information [{0}: {1}]".format( + status_code, response.json["message"] + ) + ) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(), + supports_check_mode=True, + ) + if module._name in ( + "digital_ocean_floating_ip_facts", + "community.digitalocean.digital_ocean_floating_ip_facts", + ): + module.deprecate( + "The 'digital_ocean_floating_ip_facts' module has been renamed to 'digital_ocean_floating_ip_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip_info.py new file mode 100644 index 00000000..3d232a4a --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_floating_ip_info.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (C) 2017-18, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_floating_ip_info +short_description: DigitalOcean Floating IPs information +description: + - This module can be used to fetch DigitalOcean Floating IPs information. + - This module was called C(digital_ocean_floating_ip_facts) before Ansible 2.9. The usage did not change. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: "Gather information about all Floating IPs" + community.digitalocean.digital_ocean_floating_ip_info: + register: result + +- name: "List of current floating ips" + debug: + var: result.floating_ips +""" + + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs +floating_ips: + description: a DigitalOcean Floating IP resource + returned: success and no resource constraint + type: list + sample: [ + { + "ip": "45.55.96.47", + "droplet": null, + "region": { + "name": "New York 3", + "slug": "nyc3", + "sizes": [ + "512mb", + "1gb", + "2gb", + "4gb", + "8gb", + "16gb", + "32gb", + "48gb", + "64gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "locked": false + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + page = 1 + has_next = True + floating_ips = [] + status_code = None + while has_next or status_code != 200: + response = rest.get("floating_ips?page={0}&per_page=20".format(page)) + status_code = response.status_code + # stop if any error during pagination + if status_code != 200: + break + page += 1 + floating_ips.extend(response.json["floating_ips"]) + has_next = ( + "pages" in response.json["links"] + and "next" in response.json["links"]["pages"] + ) + + if status_code == 200: + module.exit_json(changed=False, floating_ips=floating_ips) + else: + module.fail_json( + msg="Error fetching information [{0}: {1}]".format( + status_code, response.json["message"] + ) + ) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(), + supports_check_mode=True, + ) + if module._name in ( + "digital_ocean_floating_ip_facts", + "community.digitalocean.digital_ocean_floating_ip_facts", + ): + module.deprecate( + "The 'digital_ocean_floating_ip_facts' module has been renamed to 'digital_ocean_floating_ip_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_image_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_image_facts.py new file mode 100644 index 00000000..7feb3af0 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_image_facts.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_image_info +short_description: Gather information about DigitalOcean images +description: + - This module can be used to gather information about DigitalOcean provided images. + - These images can be either of type C(distribution), C(application) and C(private). + - This module was called C(digital_ocean_image_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + image_type: + description: + - Specifies the type of image information to be retrieved. + - If set to C(application), then information are gathered related to all application images. + - If set to C(distribution), then information are gathered related to all distribution images. + - If set to C(private), then information are gathered related to all private images. + - If not set to any of above, then information are gathered related to all images. + default: 'all' + choices: [ 'all', 'application', 'distribution', 'private' ] + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all images + community.digitalocean.digital_ocean_image_info: + image_type: all + oauth_token: "{{ oauth_token }}" + +- name: Gather information about application images + community.digitalocean.digital_ocean_image_info: + image_type: application + oauth_token: "{{ oauth_token }}" + +- name: Gather information about distribution images + community.digitalocean.digital_ocean_image_info: + image_type: distribution + oauth_token: "{{ oauth_token }}" + +- name: Get distribution about image with slug coreos-beta + community.digitalocean.digital_ocean_image_info: + register: resp_out +- set_fact: + distribution_name: "{{ item.distribution }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?slug=='coreos-beta']" +- debug: + var: distribution_name + +""" + + +RETURN = r""" +data: + description: DigitalOcean image information + returned: success + type: list + sample: [ + { + "created_at": "2018-02-02T07:11:43Z", + "distribution": "CoreOS", + "id": 31434061, + "min_disk_size": 20, + "name": "1662.1.0 (beta)", + "public": true, + "regions": [ + "nyc1", + "sfo1", + "nyc2", + "ams2", + "sgp1", + "lon1", + "nyc3", + "ams3", + "fra1", + "tor1", + "sfo2", + "blr1" + ], + "size_gigabytes": 0.42, + "slug": "coreos-beta", + "type": "snapshot" + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + image_type = module.params["image_type"] + + rest = DigitalOceanHelper(module) + + base_url = "images?" + if image_type == "distribution": + base_url += "type=distribution&" + elif image_type == "application": + base_url += "type=application&" + elif image_type == "private": + base_url += "private=true&" + + images = rest.get_paginated_data(base_url=base_url, data_key_name="images") + + module.exit_json(changed=False, data=images) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + image_type=dict( + type="str", + required=False, + choices=["all", "application", "distribution", "private"], + default="all", + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_image_facts", + "community.digitalocean.digital_ocean_image_facts", + ): + module.deprecate( + "The 'digital_ocean_image_facts' module has been renamed to 'digital_ocean_image_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_image_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_image_info.py new file mode 100644 index 00000000..7feb3af0 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_image_info.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_image_info +short_description: Gather information about DigitalOcean images +description: + - This module can be used to gather information about DigitalOcean provided images. + - These images can be either of type C(distribution), C(application) and C(private). + - This module was called C(digital_ocean_image_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + image_type: + description: + - Specifies the type of image information to be retrieved. + - If set to C(application), then information are gathered related to all application images. + - If set to C(distribution), then information are gathered related to all distribution images. + - If set to C(private), then information are gathered related to all private images. + - If not set to any of above, then information are gathered related to all images. + default: 'all' + choices: [ 'all', 'application', 'distribution', 'private' ] + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all images + community.digitalocean.digital_ocean_image_info: + image_type: all + oauth_token: "{{ oauth_token }}" + +- name: Gather information about application images + community.digitalocean.digital_ocean_image_info: + image_type: application + oauth_token: "{{ oauth_token }}" + +- name: Gather information about distribution images + community.digitalocean.digital_ocean_image_info: + image_type: distribution + oauth_token: "{{ oauth_token }}" + +- name: Get distribution about image with slug coreos-beta + community.digitalocean.digital_ocean_image_info: + register: resp_out +- set_fact: + distribution_name: "{{ item.distribution }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?slug=='coreos-beta']" +- debug: + var: distribution_name + +""" + + +RETURN = r""" +data: + description: DigitalOcean image information + returned: success + type: list + sample: [ + { + "created_at": "2018-02-02T07:11:43Z", + "distribution": "CoreOS", + "id": 31434061, + "min_disk_size": 20, + "name": "1662.1.0 (beta)", + "public": true, + "regions": [ + "nyc1", + "sfo1", + "nyc2", + "ams2", + "sgp1", + "lon1", + "nyc3", + "ams3", + "fra1", + "tor1", + "sfo2", + "blr1" + ], + "size_gigabytes": 0.42, + "slug": "coreos-beta", + "type": "snapshot" + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + image_type = module.params["image_type"] + + rest = DigitalOceanHelper(module) + + base_url = "images?" + if image_type == "distribution": + base_url += "type=distribution&" + elif image_type == "application": + base_url += "type=application&" + elif image_type == "private": + base_url += "private=true&" + + images = rest.get_paginated_data(base_url=base_url, data_key_name="images") + + module.exit_json(changed=False, data=images) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + image_type=dict( + type="str", + required=False, + choices=["all", "application", "distribution", "private"], + default="all", + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_image_facts", + "community.digitalocean.digital_ocean_image_facts", + ): + module.deprecate( + "The 'digital_ocean_image_facts' module has been renamed to 'digital_ocean_image_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_kubernetes.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_kubernetes.py new file mode 100644 index 00000000..eda9c424 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_kubernetes.py @@ -0,0 +1,493 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_kubernetes +short_description: Create and delete a DigitalOcean Kubernetes cluster +description: + - Create and delete a Kubernetes cluster in DigitalOcean (and optionally wait for it to be running). +version_added: 1.3.0 +author: Mark Mercado (@mamercad) +options: + oauth_token: + description: + - DigitalOcean OAuth token; can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + type: str + aliases: ['API_TOKEN'] + required: true + state: + description: + - The usual, C(present) to create, C(absent) to destroy + type: str + choices: ['present', 'absent'] + default: present + name: + description: + - A human-readable name for a Kubernetes cluster. + type: str + required: true + region: + description: + - The slug identifier for the region where the Kubernetes cluster will be created. + type: str + aliases: ['region_id'] + default: nyc1 + version: + description: + - The slug identifier for the version of Kubernetes used for the cluster. See the /v2/kubernetes/options endpoint for available versions. + type: str + required: false + default: latest + auto_upgrade: + description: + - A boolean value indicating whether the cluster will be automatically upgraded to new patch releases during its maintenance window. + type: bool + required: false + default: false + surge_upgrade: + description: + - A boolean value indicating whether surge upgrade is enabled/disabled for the cluster. + - Surge upgrade makes cluster upgrades fast and reliable by bringing up new nodes before destroying the outdated nodes. + type: bool + required: false + default: false + tags: + description: + - A flat array of tag names as strings to be applied to the Kubernetes cluster. + - All clusters will be automatically tagged "k8s" and "k8s:$K8S_CLUSTER_ID" in addition to any tags provided by the user. + required: false + type: list + elements: str + maintenance_policy: + description: + - An object specifying the maintenance window policy for the Kubernetes cluster (see table below). + type: dict + required: false + node_pools: + description: + - An object specifying the details of the worker nodes available to the Kubernetes cluster (see table below). + type: list + elements: dict + suboptions: + name: + type: str + description: A human-readable name for the node pool. + size: + type: str + description: The slug identifier for the type of Droplet used as workers in the node pool. + count: + type: int + description: The number of Droplet instances in the node pool. + tags: + type: list + elements: str + description: + - An array containing the tags applied to the node pool. + - All node pools are automatically tagged C("k8s"), C("k8s-worker"), and C("k8s:$K8S_CLUSTER_ID"). + labels: + type: dict + description: An object containing a set of Kubernetes labels. The keys are user-defined. + taints: + type: list + elements: dict + description: + - An array of taints to apply to all nodes in a pool. + - Taints will automatically be applied to all existing nodes and any subsequent nodes added to the pool. + - When a taint is removed, it is removed from all nodes in the pool. + auto_scale: + type: bool + description: + - A boolean value indicating whether auto-scaling is enabled for this node pool. + min_nodes: + type: int + description: + - The minimum number of nodes that this node pool can be auto-scaled to. + - The value will be C(0) if C(auto_scale) is set to C(false). + max_nodes: + type: int + description: + - The maximum number of nodes that this node pool can be auto-scaled to. + - The value will be C(0) if C(auto_scale) is set to C(false). + default: + - name: worker-pool + size: s-1vcpu-2gb + count: 1 + tags: [] + labels: {} + taints: [] + auto_scale: false + min_nodes: 0 + max_nodes: 0 + vpc_uuid: + description: + - A string specifying the UUID of the VPC to which the Kubernetes cluster will be assigned. + - If excluded, the cluster will be assigned to your account's default VPC for the region. + type: str + required: false + return_kubeconfig: + description: + - Controls whether or not to return the C(kubeconfig). + type: bool + required: false + default: false + wait: + description: + - Wait for the cluster to be running before returning. + type: bool + required: false + default: true + wait_timeout: + description: + - How long before wait gives up, in seconds, when creating a cluster. + type: int + default: 600 + ha: + description: + - A boolean value indicating whether the control plane is run in a highly available configuration in the cluster. + - Highly available control planes incur less downtime. + type: bool + default: false +""" + + +EXAMPLES = r""" +- name: Create a new DigitalOcean Kubernetes cluster in New York 1 + community.digitalocean.digital_ocean_kubernetes: + state: present + oauth_token: "{{ lookup('env', 'DO_API_TOKEN') }}" + name: hacktoberfest + region: nyc1 + node_pools: + - name: hacktoberfest-workers + size: s-1vcpu-2gb + count: 3 + return_kubeconfig: yes + wait_timeout: 600 + register: my_cluster + +- name: Show the kubeconfig for the cluster we just created + debug: + msg: "{{ my_cluster.data.kubeconfig }}" + +- name: Destroy (delete) an existing DigitalOcean Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes: + state: absent + oauth_token: "{{ lookup('env', 'DO_API_TOKEN') }}" + name: hacktoberfest +""" + + +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/Kubernetes +# The only variance from the documented response is that the kubeconfig is (if return_kubeconfig is True) merged in at data['kubeconfig'] +RETURN = r""" +data: + description: A DigitalOcean Kubernetes cluster (and optional C(kubeconfig)) + returned: changed + type: dict + sample: + kubeconfig: |- + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: REDACTED + server: https://REDACTED.k8s.ondigitalocean.com + name: do-nyc1-hacktoberfest + contexts: + - context: + cluster: do-nyc1-hacktoberfest + user: do-nyc1-hacktoberfest-admin + name: do-nyc1-hacktoberfest + current-context: do-nyc1-hacktoberfest + kind: Config + preferences: {} + users: + - name: do-nyc1-hacktoberfest-admin + user: + token: REDACTED + kubernetes_cluster: + auto_upgrade: false + cluster_subnet: 10.244.0.0/16 + created_at: '2020-09-27T00:55:37Z' + endpoint: https://REDACTED.k8s.ondigitalocean.com + id: REDACTED + ipv4: REDACTED + maintenance_policy: + day: any + duration: 4h0m0s + start_time: '15:00' + name: hacktoberfest + node_pools: + - auto_scale: false + count: 1 + id: REDACTED + labels: null + max_nodes: 0 + min_nodes: 0 + name: hacktoberfest-workers + nodes: + - created_at: '2020-09-27T00:55:37Z' + droplet_id: '209555245' + id: REDACTED + name: hacktoberfest-workers-3tdq1 + status: + state: running + updated_at: '2020-09-27T00:58:36Z' + size: s-1vcpu-2gb + tags: + - k8s + - k8s:REDACTED + - k8s:worker + taints: [] + region: nyc1 + service_subnet: 10.245.0.0/16 + status: + state: running + surge_upgrade: false + tags: + - k8s + - k8s:REDACTED + updated_at: '2020-09-27T01:00:37Z' + version: 1.18.8-do.1 + vpc_uuid: REDACTED +""" + + +import traceback +import time +import json +from traceback import format_exc +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOKubernetes(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # Pop these values so we don't include them in the POST data + self.return_kubeconfig = self.module.params.pop("return_kubeconfig", False) + self.wait = self.module.params.pop("wait", True) + self.wait_timeout = self.module.params.pop("wait_timeout", 600) + self.module.params.pop("oauth_token") + self.cluster_id = None + + def get_by_id(self): + """Returns an existing DigitalOcean Kubernetes cluster matching on id""" + response = self.rest.get("kubernetes/clusters/{0}".format(self.cluster_id)) + json_data = response.json + if response.status_code == 200: + return json_data + return None + + def get_all_clusters(self): + """Returns all DigitalOcean Kubernetes clusters""" + response = self.rest.get("kubernetes/clusters") + json_data = response.json + if response.status_code == 200: + return json_data + return None + + def get_by_name(self, cluster_name): + """Returns an existing DigitalOcean Kubernetes cluster matching on name""" + if not cluster_name: + return None + clusters = self.get_all_clusters() + for cluster in clusters["kubernetes_clusters"]: + if cluster["name"] == cluster_name: + return cluster + return None + + def get_kubernetes_kubeconfig(self): + """Returns the kubeconfig for an existing DigitalOcean Kubernetes cluster""" + response = self.rest.get( + "kubernetes/clusters/{0}/kubeconfig".format(self.cluster_id) + ) + if response.status_code == 200: + return response.body + else: + self.module.fail_json(msg="Failed to retrieve kubeconfig") + + def get_kubernetes(self): + """Returns an existing DigitalOcean Kubernetes cluster by name""" + json_data = self.get_by_name(self.module.params["name"]) + if json_data: + self.cluster_id = json_data["id"] + return json_data + else: + return None + + def get_kubernetes_options(self): + """Fetches DigitalOcean Kubernetes options: regions, sizes, versions. + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/list_kubernetes_options + """ + response = self.rest.get("kubernetes/options") + json_data = response.json + if response.status_code == 200: + return json_data + return None + + def ensure_running(self): + """Waits for the newly created DigitalOcean Kubernetes cluster to be running""" + end_time = time.monotonic() + self.wait_timeout + while time.monotonic() < end_time: + cluster = self.get_by_id() + if cluster["kubernetes_cluster"]["status"]["state"] == "running": + return cluster + time.sleep(10) + self.module.fail_json(msg="Wait for Kubernetes cluster to be running") + + def create(self): + """Creates a DigitalOcean Kubernetes cluster + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/create_kubernetes_cluster + """ + # Get valid Kubernetes options (regions, sizes, versions) + kubernetes_options = self.get_kubernetes_options()["options"] + # Validate region + valid_regions = [str(x["slug"]) for x in kubernetes_options["regions"]] + if self.module.params.get("region") not in valid_regions: + self.module.fail_json( + msg="Invalid region {0} (valid regions are {1})".format( + self.module.params.get("region"), ", ".join(valid_regions) + ) + ) + # Validate version + valid_versions = [str(x["slug"]) for x in kubernetes_options["versions"]] + valid_versions.append("latest") + if self.module.params.get("version") not in valid_versions: + self.module.fail_json( + msg="Invalid version {0} (valid versions are {1})".format( + self.module.params.get("version"), ", ".join(valid_versions) + ) + ) + # Validate size + valid_sizes = [str(x["slug"]) for x in kubernetes_options["sizes"]] + for node_pool in self.module.params.get("node_pools"): + if node_pool["size"] not in valid_sizes: + self.module.fail_json( + msg="Invalid size {0} (valid sizes are {1})".format( + node_pool["size"], ", ".join(valid_sizes) + ) + ) + + # Create the Kubernetes cluster + json_data = self.get_kubernetes() + if json_data: + # Add the kubeconfig to the return + if self.return_kubeconfig: + json_data["kubeconfig"] = self.get_kubernetes_kubeconfig() + self.module.exit_json(changed=False, data=json_data) + if self.module.check_mode: + self.module.exit_json(changed=True) + request_params = dict(self.module.params) + response = self.rest.post("kubernetes/clusters", data=request_params) + json_data = response.json + if response.status_code >= 400: + self.module.fail_json(changed=False, msg=json_data) + # Set the cluster_id + self.cluster_id = json_data["kubernetes_cluster"]["id"] + if self.wait: + json_data = self.ensure_running() + # Add the kubeconfig to the return + if self.return_kubeconfig: + json_data["kubeconfig"] = self.get_kubernetes_kubeconfig() + self.module.exit_json(changed=True, data=json_data["kubernetes_cluster"]) + + def delete(self): + """Deletes a DigitalOcean Kubernetes cluster + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/delete_kubernetes_cluster + """ + json_data = self.get_kubernetes() + if json_data: + if self.module.check_mode: + self.module.exit_json(changed=True) + response = self.rest.delete( + "kubernetes/clusters/{0}".format(json_data["id"]) + ) + if response.status_code == 204: + self.module.exit_json( + changed=True, data=json_data, msg="Kubernetes cluster deleted" + ) + self.module.fail_json( + changed=False, msg="Failed to delete Kubernetes cluster" + ) + json_data = response.json + else: + self.module.exit_json(changed=False, msg="Kubernetes cluster not found") + + +def run(module): + state = module.params.pop("state") + cluster = DOKubernetes(module) + if state == "present": + cluster.create() + elif state == "absent": + cluster.delete() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=["present", "absent"], default="present"), + oauth_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + required=True, + ), + name=dict(type="str", required=True), + region=dict(aliases=["region_id"], default="nyc1"), + version=dict(type="str", default="latest"), + auto_upgrade=dict(type="bool", default=False), + surge_upgrade=dict(type="bool", default=False), + tags=dict(type="list", elements="str"), + maintenance_policy=dict(type="dict"), + node_pools=dict( + type="list", + elements="dict", + default=[ + { + "name": "worker-pool", + "size": "s-1vcpu-2gb", + "count": 1, + "tags": [], + "labels": {}, + "taints": [], + "auto_scale": False, + "min_nodes": 0, + "max_nodes": 0, + } + ], + ), + vpc_uuid=dict(type="str"), + return_kubeconfig=dict(type="bool", default=False), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + ha=dict(type="bool", default=False), + ), + required_if=( + [ + ("state", "present", ["name", "region", "version", "node_pools"]), + ] + ), + supports_check_mode=True, + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_kubernetes_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_kubernetes_info.py new file mode 100644 index 00000000..d60e9b4a --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_kubernetes_info.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_kubernetes_info +short_description: Returns information about an existing DigitalOcean Kubernetes cluster +description: + - Returns information about an existing DigitalOcean Kubernetes cluster. +version_added: 1.3.0 +author: Mark Mercado (@mamercad) +options: + oauth_token: + description: + - DigitalOcean OAuth token; can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + type: str + aliases: ['API_TOKEN'] + required: true + name: + description: + - A human-readable name for a Kubernetes cluster. + type: str + required: true + return_kubeconfig: + description: + - Controls whether or not to return the C(kubeconfig). + type: bool + required: false + default: false +""" + + +EXAMPLES = r""" +- name: Get information about an existing DigitalOcean Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: hacktoberfest + return_kubeconfig: yes + register: my_cluster + +- ansible.builtin.debug: + msg: "Cluster name is {{ my_cluster.data.name }}, ID is {{ my_cluster.data.id }}" + +- ansible.builtin.debug: + msg: "Cluster kubeconfig is {{ my_cluster.data.kubeconfig }}" +""" + + +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_kubernetes_clusters +# The only variance from the documented response is that the kubeconfig is (if return_kubeconfig is True) merged in at data['kubeconfig'] +RETURN = r""" +data: + description: A DigitalOcean Kubernetes cluster (and optional C(kubeconfig)) + returned: changed + type: dict + sample: + auto_upgrade: false + cluster_subnet: 10.244.0.0/16 + created_at: '2020-09-26T21:36:18Z' + endpoint: https://REDACTED.k8s.ondigitalocean.com + id: REDACTED + ipv4: REDACTED + kubeconfig: |- + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: REDACTED + server: https://REDACTED.k8s.ondigitalocean.com + name: do-nyc1-hacktoberfest + contexts: + - context: + cluster: do-nyc1-hacktoberfest + user: do-nyc1-hacktoberfest-admin + name: do-nyc1-hacktoberfest + current-context: do-nyc1-hacktoberfest + kind: Config + preferences: {} + users: + - name: do-nyc1-hacktoberfest-admin + user: + token: REDACTED + maintenance_policy: + day: any + duration: 4h0m0s + start_time: '13:00' + name: hacktoberfest + node_pools: + - auto_scale: false + count: 1 + id: REDACTED + labels: null + max_nodes: 0 + min_nodes: 0 + name: hacktoberfest-workers + nodes: + - created_at: '2020-09-26T21:36:18Z' + droplet_id: 'REDACTED' + id: REDACTED + name: hacktoberfest-workers-3tv46 + status: + state: running + updated_at: '2020-09-26T21:40:28Z' + size: s-1vcpu-2gb + tags: + - k8s + - k8s:REDACTED + - k8s:worker + taints: [] + region: nyc1 + service_subnet: 10.245.0.0/16 + status: + state: running + surge_upgrade: false + tags: + - k8s + - k8s:REDACTED + updated_at: '2020-09-26T21:42:29Z' + version: 1.18.8-do.0 + vpc_uuid: REDACTED +""" + + +import traceback +import time +import json +from traceback import format_exc +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOKubernetesInfo(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # Pop these values so we don't include them in the POST data + self.module.params.pop("oauth_token") + self.return_kubeconfig = self.module.params.pop("return_kubeconfig") + self.cluster_id = None + + def get_by_id(self): + """Returns an existing DigitalOcean Kubernetes cluster matching on id""" + response = self.rest.get("kubernetes/clusters/{0}".format(self.cluster_id)) + json_data = response.json + if response.status_code == 200: + return json_data + return None + + def get_all_clusters(self): + """Returns all DigitalOcean Kubernetes clusters""" + response = self.rest.get("kubernetes/clusters") + json_data = response.json + if response.status_code == 200: + return json_data + return None + + def get_by_name(self, cluster_name): + """Returns an existing DigitalOcean Kubernetes cluster matching on name""" + if not cluster_name: + return None + clusters = self.get_all_clusters() + for cluster in clusters["kubernetes_clusters"]: + if cluster["name"] == cluster_name: + return cluster + return None + + def get_kubernetes_kubeconfig(self): + """Returns the kubeconfig for an existing DigitalOcean Kubernetes cluster""" + response = self.rest.get( + "kubernetes/clusters/{0}/kubeconfig".format(self.cluster_id) + ) + if response.status_code == 200: + return response.body + else: + self.module.fail_json(msg="Failed to retrieve kubeconfig") + + def get_kubernetes(self): + """Returns an existing DigitalOcean Kubernetes cluster by name""" + json_data = self.get_by_name(self.module.params["name"]) + if json_data: + self.cluster_id = json_data["id"] + return json_data + else: + return None + + def get(self): + """Fetches an existing DigitalOcean Kubernetes cluster + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_kubernetes_clusters + """ + json_data = self.get_kubernetes() + if json_data: + if self.return_kubeconfig: + json_data["kubeconfig"] = self.get_kubernetes_kubeconfig() + self.module.exit_json(changed=False, data=json_data) + self.module.fail_json(changed=False, msg="Kubernetes cluster not found") + + +def run(module): + cluster = DOKubernetesInfo(module) + cluster.get() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + oauth_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + required=True, + ), + name=dict(type="str", required=True), + return_kubeconfig=dict(type="bool", default=False), + ), + supports_check_mode=True, + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer.py new file mode 100644 index 00000000..ecc9efa4 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer.py @@ -0,0 +1,881 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Mark Mercado <mamercad@gmail.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_load_balancer +version_added: 1.10.0 +short_description: Manage DigitalOcean Load Balancers +description: + - Manage DigitalOcean Load Balancers +author: "Mark Mercado (@mamercad)" +options: + state: + description: + - The usual, C(present) to create, C(absent) to destroy + type: str + choices: ["present", "absent"] + default: present + name: + description: + - A human-readable name for a load balancer instance. + - Required and must be unique (current API documentation is not up-to-date for this parameter). + type: str + required: true + size: + description: + - The size of the load balancer. + - The available sizes are C(lb-small), C(lb-medium), or C(lb-large). + - You can resize load balancers after creation up to once per hour. + - You cannot resize a load balancer within the first hour of its creation. + - This field has been replaced by the C(size_unit) field for all regions except in C(ams2), C(nyc2), and C(sfo1). + - Each available load balancer size now equates to the load balancer having a set number of nodes. + - The formula is C(lb-small) = 1 node, C(lb-medium) = 3 nodes, C(lb-large) = 6 nodes. + required: false + type: str + choices: ["lb-small", "lb-medium", "lb-large"] + default: lb-small + size_unit: + description: + - How many nodes the load balancer contains. + - Each additional node increases the load balancer's ability to manage more connections. + - Load balancers can be scaled up or down, and you can change the number of nodes after creation up to once per hour. + - This field is currently not available in the C(ams2), C(nyc2), or C(sfo1) regions. + - Use the C(size) field to scale load balancers that reside in these regions. + - The value must be in the range 1-100. + required: false + type: int + default: 1 + droplet_ids: + description: + - An array containing the IDs of the Droplets assigned to the load balancer. + - Required when creating load balancers. + - Mutually exclusive with tag, you can either define tag or droplet_ids but not both. + required: false + type: list + elements: int + tag: + description: + - A tag associated with the droplets that you want to dynamically assign to the load balancer. + - Required when creating load balancers. + - Mutually exclusive with droplet_ids, you can either define tag or droplet_ids but not both. + required: false + type: str + region: + description: + - The slug identifier for the region where the resource will initially be available. + required: false + type: str + aliases: ["region_id"] + forwarding_rules: + description: + - An array of objects specifying the forwarding rules for a load balancer. + - Required when creating load balancers. + required: false + type: list + elements: dict + suboptions: + entry_protocol: + type: str + description: Entry protocol + default: http + entry_port: + type: int + description: Entry port + default: 8080 + target_protocol: + type: str + description: Target protocol + default: http + target_port: + type: int + description: Target port + default: 8080 + certificate_id: + type: str + description: Certificate ID + default: "" + tls_passthrough: + type: bool + description: TLS passthrough + default: false + default: + - entry_protocol: http + entry_port: 8080 + target_protocol: http + target_port: 8080 + certificate_id: "" + tls_passthrough: false + health_check: + description: + - An object specifying health check settings for the load balancer. + required: false + type: dict + suboptions: + protocol: + description: Protocol + type: str + required: false + default: http + port: + description: Port + type: int + required: false + default: 80 + path: + description: Path + type: str + required: false + default: / + check_interval_seconds: + description: Check interval seconds + type: int + required: false + default: 10 + response_timeout_seconds: + description: Response timeout seconds + type: int + required: false + default: 5 + healthy_threshold: + description: Healthy threshold + type: int + required: false + default: 5 + unhealthy_threshold: + description: Unhealthy threshold + type: int + required: false + default: 3 + default: + protocol: http + port: 80 + path: / + check_interval_seconds: 10 + response_timeout_seconds: 5 + healthy_threshold: 5 + unhealthy_threshold: 3 + sticky_sessions: + description: + - An object specifying sticky sessions settings for the load balancer. + required: false + type: dict + suboptions: + type: + description: Type + type: str + required: false + default: none + default: + type: none + redirect_http_to_https: + description: + - A boolean value indicating whether HTTP requests to the load balancer on port 80 will be redirected to HTTPS on port 443. + type: bool + required: false + default: false + enable_proxy_protocol: + description: + - A boolean value indicating whether PROXY Protocol is in use. + type: bool + required: false + default: false + enable_backend_keepalive: + description: + - A boolean value indicating whether HTTP keepalive connections are maintained to target Droplets. + type: bool + required: false + default: false + vpc_uuid: + description: + - A string specifying the UUID of the VPC to which the load balancer is assigned. + - If unspecified, uses the default VPC in the region. + type: str + required: false + wait: + description: + - Wait for the Load Balancer to be running before returning. + type: bool + required: false + default: true + wait_timeout: + description: + - How long before wait gives up, in seconds, when creating a Load Balancer. + type: int + default: 600 + project_name: + aliases: ["project"] + description: + - Project to assign the resource to (project name, not UUID). + - Defaults to the default project of the account (empty string). + - Currently only supported when creating. + type: str + required: false + default: "" +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Create a Load Balancer + community.digitalocean.digital_ocean_load_balancer: + state: present + name: test-loadbalancer-1 + droplet_ids: + - 12345678 + region: nyc1 + forwarding_rules: + - entry_protocol: http + entry_port: 8080 + target_protocol: http + target_port: 8080 + certificate_id: "" + tls_passthrough: false + +- name: Create a Load Balancer (and assign to Project "test") + community.digitalocean.digital_ocean_load_balancer: + state: present + name: test-loadbalancer-1 + droplet_ids: + - 12345678 + region: nyc1 + forwarding_rules: + - entry_protocol: http + entry_port: 8080 + target_protocol: http + target_port: 8080 + certificate_id: "" + tls_passthrough: false + project: test + +- name: Create a Load Balancer and associate it with a tag + community.digitalocean.digital_ocean_load_balancer: + state: present + name: test-loadbalancer-1 + tag: test-tag + region: tor1 +""" + + +RETURN = r""" +data: + description: A DigitalOcean Load Balancer + returned: changed + type: dict + sample: + load_balancer: + algorithm: round_robin + created_at: '2021-08-22T14:23:41Z' + droplet_ids: + - 261172461 + enable_backend_keepalive: false + enable_proxy_protocol: false + forwarding_rules: + - certificate_id: '' + entry_port: 8080 + entry_protocol: http + target_port: 8080 + target_protocol: http + tls_passthrough: false + health_check: + check_interval_seconds: 10 + healthy_threshold: 5 + path: / + port: 80 + protocol: http + response_timeout_seconds: 5 + unhealthy_threshold: 3 + id: b4fdb507-70e8-4325-a89e-d02271b93618 + ip: 159.203.150.113 + name: test-loadbalancer-1 + redirect_http_to_https: false + region: + available: true + features: + - backups + - ipv6 + - metadata + - install_agent + - storage + - image_transfer + name: New York 3 + sizes: + - s-1vcpu-1gb + - s-1vcpu-1gb-amd + - s-1vcpu-1gb-intel + - s-1vcpu-2gb + - s-1vcpu-2gb-amd + - s-1vcpu-2gb-intel + - s-2vcpu-2gb + - s-2vcpu-2gb-amd + - s-2vcpu-2gb-intel + - s-2vcpu-4gb + - s-2vcpu-4gb-amd + - s-2vcpu-4gb-intel + - s-4vcpu-8gb + - c-2 + - c2-2vcpu-4gb + - s-4vcpu-8gb-amd + - s-4vcpu-8gb-intel + - g-2vcpu-8gb + - gd-2vcpu-8gb + - s-8vcpu-16gb + - m-2vcpu-16gb + - c-4 + - c2-4vcpu-8gb + - s-8vcpu-16gb-amd + - s-8vcpu-16gb-intel + - m3-2vcpu-16gb + - g-4vcpu-16gb + - so-2vcpu-16gb + - m6-2vcpu-16gb + - gd-4vcpu-16gb + - so1_5-2vcpu-16gb + - m-4vcpu-32gb + - c-8 + - c2-8vcpu-16gb + - m3-4vcpu-32gb + - g-8vcpu-32gb + - so-4vcpu-32gb + - m6-4vcpu-32gb + - gd-8vcpu-32gb + - so1_5-4vcpu-32gb + - m-8vcpu-64gb + - c-16 + - c2-16vcpu-32gb + - m3-8vcpu-64gb + - g-16vcpu-64gb + - so-8vcpu-64gb + - m6-8vcpu-64gb + - gd-16vcpu-64gb + - so1_5-8vcpu-64gb + - m-16vcpu-128gb + - c-32 + - c2-32vcpu-64gb + - m3-16vcpu-128gb + - m-24vcpu-192gb + - g-32vcpu-128gb + - so-16vcpu-128gb + - m6-16vcpu-128gb + - gd-32vcpu-128gb + - m3-24vcpu-192gb + - g-40vcpu-160gb + - so1_5-16vcpu-128gb + - m-32vcpu-256gb + - gd-40vcpu-160gb + - so-24vcpu-192gb + - m6-24vcpu-192gb + - m3-32vcpu-256gb + - so1_5-24vcpu-192gb + - m6-32vcpu-256gb + slug: nyc3 + size: lb-small + status: active + sticky_sessions: + type: none + tag: '' + vpc_uuid: b8fd9a58-d93d-4329-b54a-78a397d64855 +msg: + description: Informational or error message encountered during execution + returned: changed + type: str + sample: No project named test2 found +assign_status: + description: Assignment status (ok, not_found, assigned, already_assigned, service_down) + returned: changed + type: str + sample: assigned +resources: + description: Resource assignment involved in project assignment + returned: changed + type: dict + sample: + assigned_at: '2021-10-25T17:39:38Z' + links: + self: https://api.digitalocean.com/v2/load_balancers/17d171d0-8a8b-4251-9c18-c96cc515d36d + status: assigned + urn: do:loadbalancer:17d171d0-8a8b-4251-9c18-c96cc515d36d +""" + + +import time +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, + DigitalOceanProjects, +) + + +class DOLoadBalancer(object): + + # Regions which use 'size' versus 'size_unit' + size_regions = {"ams2", "nyc2", "sfo1"} + all_sizes = {"lb-small", "lb-medium", "lb-large"} + default_size = "lb-small" + min_size_unit = 1 + max_size_unit = 100 + default_size_unit = 1 + + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + self.id = None + self.name = self.module.params.get("name") + self.region = self.module.params.get("region") + + # Handle size versus size_unit + if self.region in DOLoadBalancer.size_regions: + self.module.params.pop("size_unit") + # Ensure that we have size + size = self.module.params.get("size", None) + if not size: + self.module.fail_json(msg="Missing required 'size' parameter") + elif size not in DOLoadBalancer.all_sizes: + self.module.fail_json( + msg="Invalid 'size' parameter '{0}', must be one of: {1}".format( + size, ", ".join(DOLoadBalancer.all_sizes) + ) + ) + else: + self.module.params.pop("size") + # Ensure that we have size_unit + size_unit = self.module.params.get("size_unit", None) + if not size_unit: + self.module.fail_json(msg="Missing required 'size_unit' parameter") + elif ( + size_unit < DOLoadBalancer.min_size_unit + or size_unit > DOLoadBalancer.max_size_unit + ): + self.module.fail_json( + msg="Invalid 'size_unit' parameter '{0}', must be in range: {1}-{2}".format( + size_unit, + DOLoadBalancer.min_size_unit, + DOLoadBalancer.max_size_unit, + ) + ) + + self.updates = [] + + # Pop these values so we don't include them in the POST data + self.module.params.pop("oauth_token") + self.wait = self.module.params.pop("wait", True) + self.wait_timeout = self.module.params.pop("wait_timeout", 600) + if self.module.params.get("project"): + # only load for non-default project assignments + self.projects = DigitalOceanProjects(module, self.rest) + + def get_by_id(self): + """Fetch an existing DigitalOcean Load Balancer (by id) + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/get_load_balancer + """ + response = self.rest.get("load_balancers/{0}".format(self.id)) + json_data = response.json + if response.status_code == 200: + # Found one with the given id: + lb = json_data.get("load_balancer", None) + if lb is not None: + self.lb = lb + return lb + else: + self.module.fail_json( + msg="Unexpected error; please file a bug: get_by_id" + ) + return None + + def get_by_name(self): + """Fetch all existing DigitalOcean Load Balancers + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_load_balancers + """ + page = 1 + while page is not None: + response = self.rest.get("load_balancers?page={0}".format(page)) + json_data = response.json + if json_data is None: + self.module.fail_json( + msg="Empty response from the DigitalOcean API; please try again or open a bug if it never succeeds." + ) + if response.status_code == 200: + lbs = json_data.get("load_balancers", []) + for lb in lbs: + # Found one with the same name: + name = lb.get("name", None) + if name == self.name: + # Make sure the region is the same! + region = lb.get("region", None) + if region is not None: + region_slug = region.get("slug", None) + if region_slug is not None: + if region_slug == self.region: + self.lb = lb + return lb + else: + self.module.fail_json( + msg="Cannot change load balancer region -- delete and re-create" + ) + else: + self.module.fail_json( + msg="Unexpected error; please file a bug: get_by_name" + ) + else: + self.module.fail_json( + msg="Unexpected error; please file a bug: get_by_name" + ) + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + else: + self.module.fail_json( + msg="Unexpected error; please file a bug: get_by_name" + ) + return None + + def ensure_active(self): + """Wait for the existing Load Balancer to be active""" + end_time = time.monotonic() + self.wait_timeout + while time.monotonic() < end_time: + if self.get_by_id(): + status = self.lb.get("status", None) + if status is not None: + if status == "active": + return True + else: + self.module.fail_json( + msg="Unexpected error; please file a bug: ensure_active" + ) + else: + self.module.fail_json( + msg="Load Balancer {0} in {1} not found".format( + self.id, self.region + ) + ) + time.sleep(10) + self.module.fail_json( + msg="Timed out waiting for Load Balancer {0} in {1} to be active".format( + self.id, self.region + ) + ) + + def is_same(self, found_lb): + """Checks if exising Load Balancer is the same as requested""" + + check_attributes = [ + "droplet_ids", + "size", + "size_unit", + "forwarding_rules", + "health_check", + "sticky_sessions", + "redirect_http_to_https", + "enable_proxy_protocol", + "enable_backend_keepalive", + ] + + lb_region = found_lb.get("region", None) + if not lb_region: + self.module.fail_json( + msg="Unexpected error; please file a bug should this persist: empty load balancer region" + ) + + lb_region_slug = lb_region.get("slug", None) + if not lb_region_slug: + self.module.fail_json( + msg="Unexpected error; please file a bug should this persist: empty load balancer region slug" + ) + + for attribute in check_attributes: + if ( + attribute == "size" + and lb_region_slug not in DOLoadBalancer.size_regions + ): + continue + if ( + attribute == "size_unit" + and lb_region_slug in DOLoadBalancer.size_regions + ): + continue + if self.module.params.get(attribute, None) != found_lb.get(attribute, None): + self.updates.append(attribute) + + # Check if the VPC needs changing. + vpc_uuid = self.lb.get("vpc_uuid", None) + if vpc_uuid is not None: + if vpc_uuid != found_lb.get("vpc_uuid", None): + self.updates.append("vpc_uuid") + + if len(self.updates): + return False + else: + return True + + def update(self): + """Updates a DigitalOcean Load Balancer + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/update_load_balancer + """ + request_params = dict(self.module.params) + self.id = self.lb.get("id", None) + self.name = self.lb.get("name", None) + self.vpc_uuid = self.lb.get("vpc_uuid", None) + if self.id is not None and self.name is not None and self.vpc_uuid is not None: + request_params["vpc_uuid"] = self.vpc_uuid + response = self.rest.put( + "load_balancers/{0}".format(self.id), data=request_params + ) + json_data = response.json + if response.status_code == 200: + self.module.exit_json( + changed=True, + msg="Load Balancer {0} ({1}) in {2} updated: {3}".format( + self.name, self.id, self.region, ", ".join(self.updates) + ), + ) + else: + self.module.fail_json( + changed=False, + msg="Error updating Load Balancer {0} ({1}) in {2}: {3}".format( + self.name, self.id, self.region, json_data["message"] + ), + ) + else: + self.module.fail_json(msg="Unexpected error; please file a bug: update") + + def create(self): + """Creates a DigitalOcean Load Balancer + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/create_load_balancer + """ + + # Check if it exists already (the API docs aren't up-to-date right now, + # "name" is required and must be unique across the account. + found_lb = self.get_by_name() + if found_lb is not None: + # Do we need to update it? + if not self.is_same(found_lb): + if self.module.check_mode: + self.module.exit_json( + changed=False, + msg="Load Balancer {0} already exists in {1} (and needs changes)".format( + self.name, self.region + ), + data={"load_balancer": found_lb}, + ) + else: + self.update() + else: + self.module.exit_json( + changed=False, + msg="Load Balancer {0} already exists in {1} (and needs no changes)".format( + self.name, self.region + ), + data={"load_balancer": found_lb}, + ) + + # Check mode. + if self.module.check_mode: + self.module.exit_json( + changed=False, + msg="Would create Load Balancer {0} in {1}".format( + self.name, self.region + ), + ) + + # Create it. + request_params = dict(self.module.params) + response = self.rest.post("load_balancers", data=request_params) + json_data = response.json + if response.status_code != 202: + self.module.fail_json( + msg="Failed creating Load Balancer {0} in {1}: {2}".format( + self.name, self.region, json_data["message"] + ) + ) + + # Store it. + lb = json_data.get("load_balancer", None) + if lb is None: + self.module.fail_json( + msg="Unexpected error; please file a bug: create empty lb" + ) + + self.id = lb.get("id", None) + if self.id is None: + self.module.fail_json( + msg="Unexpected error; please file a bug: create missing id" + ) + + if self.wait: + self.ensure_active() + + project_name = self.module.params.get("project") + if project_name: # empty string is the default project, skip project assignment + urn = "do:loadbalancer:{0}".format(self.id) + ( + assign_status, + error_message, + resources, + ) = self.projects.assign_to_project(project_name, urn) + self.module.exit_json( + changed=True, + data=json_data, + msg=error_message, + assign_status=assign_status, + resources=resources, + ) + else: + self.module.exit_json(changed=True, data=json_data) + + def delete(self): + """Deletes a DigitalOcean Load Balancer + API reference: https://docs.digitalocean.com/reference/api/api-reference/#operation/delete_load_balancer + """ + + lb = self.get_by_name() + if lb is not None: + id = lb.get("id", None) + name = lb.get("name", None) + lb_region = lb.get("region", None) + if not lb_region: + self.module.fail_json( + msg="Unexpected error; please file a bug: delete missing region" + ) + lb_region_slug = lb_region.get("slug", None) + if id is None or name is None or lb_region_slug is None: + self.module.fail_json( + msg="Unexpected error; please file a bug: delete missing id, name, or region slug" + ) + else: + response = self.rest.delete("load_balancers/{0}".format(id)) + json_data = response.json + if response.status_code == 204: + # Response body should be empty + self.module.exit_json( + changed=True, + msg="Load Balancer {0} ({1}) in {2} deleted".format( + name, id, lb_region_slug + ), + ) + else: + message = json_data.get( + "message", "Empty failure message from the DigitalOcean API!" + ) + self.module.fail_json( + changed=False, + msg="Failed to delete Load Balancer {0} ({1}) in {2}: {3}".format( + name, id, lb_region_slug, message + ), + ) + else: + self.module.fail_json( + changed=False, + msg="Load Balancer {0} not found in {1}".format(self.name, self.region), + ) + + +def run(module): + state = module.params.pop("state") + lb = DOLoadBalancer(module) + if state == "present": + lb.create() + elif state == "absent": + lb.delete() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + name=dict(type="str", required=True), + size=dict( + type="str", + choices=list(DOLoadBalancer.all_sizes), + required=False, + default=DOLoadBalancer.default_size, + ), + size_unit=dict( + type="int", + required=False, + default=DOLoadBalancer.default_size_unit, + ), + droplet_ids=dict(type="list", elements="int", required=False), + tag=dict(type="str", required=False), + region=dict( + aliases=["region_id"], + required=False, + ), + forwarding_rules=dict( + type="list", + elements="dict", + required=False, + default=[ + { + "entry_protocol": "http", + "entry_port": 8080, + "target_protocol": "http", + "target_port": 8080, + "certificate_id": "", + "tls_passthrough": False, + } + ], + ), + health_check=dict( + type="dict", + required=False, + default=dict( + { + "protocol": "http", + "port": 80, + "path": "/", + "check_interval_seconds": 10, + "response_timeout_seconds": 5, + "healthy_threshold": 5, + "unhealthy_threshold": 3, + } + ), + ), + sticky_sessions=dict( + type="dict", required=False, default=dict({"type": "none"}) + ), + redirect_http_to_https=dict(type="bool", required=False, default=False), + enable_proxy_protocol=dict(type="bool", required=False, default=False), + enable_backend_keepalive=dict(type="bool", required=False, default=False), + vpc_uuid=dict(type="str", required=False), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + project_name=dict(type="str", aliases=["project"], required=False, default=""), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + [ + ("state", "present", ["forwarding_rules"]), + ("state", "present", ["tag", "droplet_ids"], True), + ] + ), + # Droplet ID and tag are mutually exclusive, check that both have not been defined + mutually_exclusive=( + [ + ("tag", "droplet_ids"), + ("size", "size_unit"), + ] + ), + supports_check_mode=True, + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer_facts.py new file mode 100644 index 00000000..bc1f9250 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer_facts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_load_balancer_info +short_description: Gather information about DigitalOcean load balancers +description: + - This module can be used to gather information about DigitalOcean provided load balancers. + - This module was called C(digital_ocean_load_balancer_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + load_balancer_id: + description: + - Load balancer ID that can be used to identify and reference a load_balancer. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all load balancers + community.digitalocean.digital_ocean_load_balancer_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about load balancer with given id + community.digitalocean.digital_ocean_load_balancer_info: + oauth_token: "{{ oauth_token }}" + load_balancer_id: "4de7ac8b-495b-4884-9a69-1050c6793cd6" + +- name: Get name from load balancer id + community.digitalocean.digital_ocean_load_balancer_info: + register: resp_out +- set_fact: + load_balancer_name: "{{ item.name }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?id=='4de7ac8b-495b-4884-9a69-1050c6793cd6']" +- debug: + var: load_balancer_name +""" + + +RETURN = r""" +data: + description: DigitalOcean Load balancer information + returned: success + type: list + elements: dict + sample: [ + { + "id": "4de7ac8b-495b-4884-9a69-1050c6793cd6", + "name": "example-lb-01", + "ip": "104.131.186.241", + "algorithm": "round_robin", + "status": "new", + "created_at": "2017-02-01T22:22:58Z", + ... + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + load_balancer_id = module.params.get("load_balancer_id", None) + rest = DigitalOceanHelper(module) + + base_url = "load_balancers" + if load_balancer_id is not None: + response = rest.get("%s/%s" % (base_url, load_balancer_id)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve load balancers for DigitalOcean") + + load_balancer = [response.json["load_balancer"]] + else: + load_balancer = rest.get_paginated_data( + base_url=base_url + "?", data_key_name="load_balancers" + ) + + module.exit_json(changed=False, data=load_balancer) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + load_balancer_id=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_load_balancer_facts", + "community.digitalocean.digital_ocean_load_balancer_facts", + ): + module.deprecate( + "The 'digital_ocean_load_balancer_facts' module has been renamed to 'digital_ocean_load_balancer_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer_info.py new file mode 100644 index 00000000..bc1f9250 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_load_balancer_info.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_load_balancer_info +short_description: Gather information about DigitalOcean load balancers +description: + - This module can be used to gather information about DigitalOcean provided load balancers. + - This module was called C(digital_ocean_load_balancer_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + load_balancer_id: + description: + - Load balancer ID that can be used to identify and reference a load_balancer. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all load balancers + community.digitalocean.digital_ocean_load_balancer_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about load balancer with given id + community.digitalocean.digital_ocean_load_balancer_info: + oauth_token: "{{ oauth_token }}" + load_balancer_id: "4de7ac8b-495b-4884-9a69-1050c6793cd6" + +- name: Get name from load balancer id + community.digitalocean.digital_ocean_load_balancer_info: + register: resp_out +- set_fact: + load_balancer_name: "{{ item.name }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?id=='4de7ac8b-495b-4884-9a69-1050c6793cd6']" +- debug: + var: load_balancer_name +""" + + +RETURN = r""" +data: + description: DigitalOcean Load balancer information + returned: success + type: list + elements: dict + sample: [ + { + "id": "4de7ac8b-495b-4884-9a69-1050c6793cd6", + "name": "example-lb-01", + "ip": "104.131.186.241", + "algorithm": "round_robin", + "status": "new", + "created_at": "2017-02-01T22:22:58Z", + ... + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + load_balancer_id = module.params.get("load_balancer_id", None) + rest = DigitalOceanHelper(module) + + base_url = "load_balancers" + if load_balancer_id is not None: + response = rest.get("%s/%s" % (base_url, load_balancer_id)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve load balancers for DigitalOcean") + + load_balancer = [response.json["load_balancer"]] + else: + load_balancer = rest.get_paginated_data( + base_url=base_url + "?", data_key_name="load_balancers" + ) + + module.exit_json(changed=False, data=load_balancer) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + load_balancer_id=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_load_balancer_facts", + "community.digitalocean.digital_ocean_load_balancer_facts", + ): + module.deprecate( + "The 'digital_ocean_load_balancer_facts' module has been renamed to 'digital_ocean_load_balancer_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_monitoring_alerts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_monitoring_alerts.py new file mode 100644 index 00000000..67825ccc --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_monitoring_alerts.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Mark Mercado <mamercad@gmail.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_monitoring_alerts +version_added: 1.10.0 +short_description: Programmatically retrieve metrics as well as configure alert policies based on these metrics +description: + - The DigitalOcean Monitoring API makes it possible to programmatically retrieve metrics as well as configure alert policies based on these metrics. + - The Monitoring API can help you gain insight into how your apps are performing and consuming resources. +author: "Mark Mercado (@mamercad)" +options: + oauth_token: + description: + - DigitalOcean OAuth token; can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + type: str + aliases: ["API_TOKEN"] + required: true + state: + description: + - The usual, C(present) to create, C(absent) to destroy + type: str + choices: ["present", "absent"] + default: present + alerts: + description: + - Alert object, required for C(state=present) + - Supports C(email["email1", "email2", ...]) and C(slack[{"channel1", "url1"}, {"channel2", "url2"}, ...]) + type: dict + required: false + compare: + description: Alert comparison, required for C(state=present) + type: str + required: false + choices: ["GreaterThan", "LessThan"] + description: + description: Alert description, required for C(state=present) + type: str + required: false + enabled: + description: Enabled or not, required for C(state=present) + type: bool + required: false + entities: + description: Alert entities, required for C(state=present) + type: list + elements: str + required: false + tags: + description: Alert tags, required for C(state=present) + type: list + elements: str + required: false + type: + description: + - Alert type, required for C(state=present) + - See U(https://docs.digitalocean.com/reference/api/api-reference/#operation/create_alert_policy) for valid types + type: str + required: false + choices: + - v1/insights/droplet/load_1 + - v1/insights/droplet/load_5 + - v1/insights/droplet/load_15 + - v1/insights/droplet/memory_utilization_percent + - v1/insights/droplet/disk_utilization_percent + - v1/insights/droplet/cpu + - v1/insights/droplet/disk_read + - v1/insights/droplet/disk_write + - v1/insights/droplet/public_outbound_bandwidth + - v1/insights/droplet/public_inbound_bandwidth + - v1/insights/droplet/private_outbound_bandwidth + - v1/insights/droplet/private_inbound_bandwidth + value: + description: Alert threshold, required for C(state=present) + type: float + required: false + window: + description: Alert window, required for C(state=present) + type: str + choices: ["5m", "10m", "30m", "1h"] + required: false + uuid: + description: Alert uuid, required for C(state=absent) + type: str + required: false +""" + + +EXAMPLES = r""" +- name: Create Droplet Monitoring alerts policy + community.digitalocean.digital_ocean_monitoring_alerts: + state: present + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + alerts: + email: ["alerts@example.com"] + slack: [] + compare: GreaterThan + description: Droplet load1 alert + enabled: true + entities: ["{{ droplet_id }}"] + tags: ["my_alert_tag"] + type: v1/insights/droplet/load_1 + value: 3.14159 + window: 5m + register: monitoring_alert_policy + +- name: Delete Droplet Monitoring alerts policy + community.digitalocean.digital_ocean_monitoring_alerts: + state: absent + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + uuid: "{{ monitoring_alert_policy.data.uuid }}" +""" + + +RETURN = r""" +data: + description: A DigitalOcean Monitoring alerts policy + returned: changed + type: dict + sample: + alerts: + email: + - mamercad@gmail.com + slack: [] + compare: GreaterThan + description: Droplet load1 alert + enabled: true + entities: + - '262383737' + tags: + - my_alert_tag + type: v1/insights/droplet/load_1 + uuid: 9f988f00-4690-443d-b638-ed5a99bbad3b + value: 3.14159 + window: 5m +""" + + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +alert_types = [ + "v1/insights/droplet/load_1", + "v1/insights/droplet/load_5", + "v1/insights/droplet/load_15", + "v1/insights/droplet/memory_utilization_percent", + "v1/insights/droplet/disk_utilization_percent", + "v1/insights/droplet/cpu", + "v1/insights/droplet/disk_read", + "v1/insights/droplet/disk_write", + "v1/insights/droplet/public_outbound_bandwidth", + "v1/insights/droplet/public_inbound_bandwidth", + "v1/insights/droplet/private_outbound_bandwidth", + "v1/insights/droplet/private_inbound_bandwidth", +] + +alert_keys = [ + "alerts", + "compare", + "description", + "enabled", + "entities", + "tags", + "type", + "value", + "window", +] + +alert_windows = ["5m", "10m", "30m", "1h"] + + +class DOMonitoringAlerts(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # Pop these values so we don't include them in the POST data + self.module.params.pop("oauth_token") + + def get_alerts(self): + alerts = self.rest.get_paginated_data( + base_url="monitoring/alerts?", data_key_name="policies" + ) + return alerts + + def get_alert(self): + alerts = self.rest.get_paginated_data( + base_url="monitoring/alerts?", data_key_name="policies" + ) + for alert in alerts: + for alert_key in alert_keys: + if alert.get(alert_key, None) != self.module.params.get( + alert_key, None + ): + break # This key doesn't match, try the next alert. + else: + return alert # Didn't hit break, this alert matches. + return None + + def create(self): + # Check for an existing (same) one. + alert = self.get_alert() + if alert is not None: + self.module.exit_json( + changed=False, + data=alert, + ) + + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + # Create it. + request_params = dict(self.module.params) + response = self.rest.post("monitoring/alerts", data=request_params) + if response.status_code == 200: + alert = self.get_alert() + if alert is not None: + self.module.exit_json( + changed=True, + data=alert, + ) + else: + self.module.fail_json( + changed=False, msg="Unexpected error; please file a bug: create" + ) + else: + self.module.fail_json( + msg="Create Monitoring Alert '{0}' failed [HTTP {1}: {2}]".format( + self.module.params.get("description"), + response.status_code, + response.json.get("message", None), + ) + ) + + def delete(self): + uuid = self.module.params.get("uuid", None) + if uuid is not None: + + # Check mode + if self.module.check_mode: + self.module.exit_json(changed=True) + + # Delete it + response = self.rest.delete("monitoring/alerts/{0}".format(uuid)) + if response.status_code == 204: + self.module.exit_json( + changed=True, + msg="Deleted Monitoring Alert {0}".format(uuid), + ) + else: + self.module.fail_json( + msg="Delete Monitoring Alert {0} failed [HTTP {1}: {2}]".format( + uuid, + response.status_code, + response.json.get("message", None), + ) + ) + else: + self.module.fail_json( + changed=False, msg="Unexpected error; please file a bug: delete" + ) + + +def run(module): + state = module.params.pop("state") + alerts = DOMonitoringAlerts(module) + if state == "present": + alerts.create() + else: + alerts.delete() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + oauth_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + required=True, + ), + state=dict( + choices=["present", "absent"], default="present", required=False + ), + alerts=dict(type="dict", required=False), + compare=dict( + type="str", choices=["GreaterThan", "LessThan"], required=False + ), + description=dict(type="str", required=False), + enabled=dict(type="bool", required=False), + entities=dict(type="list", elements="str", required=False), + tags=dict(type="list", elements="str", required=False), + type=dict(type="str", choices=alert_types, required=False), + value=dict(type="float", required=False), + window=dict(type="str", choices=alert_windows, required=False), + uuid=dict(type="str", required=False), + ), + required_if=( + [ + ("state", "present", alert_keys), + ("state", "absent", ["uuid"]), + ] + ), + supports_check_mode=True, + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_monitoring_alerts_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_monitoring_alerts_info.py new file mode 100644 index 00000000..a5d87ad6 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_monitoring_alerts_info.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Mark Mercado <mamercad@gmail.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_monitoring_alerts_info +version_added: 1.10.0 +short_description: Programmatically retrieve metrics as well as configure alert policies based on these metrics +description: + - The DigitalOcean Monitoring API makes it possible to programmatically retrieve metrics as well as configure alert policies based on these metrics. + - The Monitoring API can help you gain insight into how your apps are performing and consuming resources. +author: "Mark Mercado (@mamercad)" +options: + state: + description: + - C(present) to return alerts + type: str + choices: ["present"] + default: present + oauth_token: + description: + - DigitalOcean OAuth token; can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + type: str + aliases: ["API_TOKEN"] + required: true + uuid: + description: + - Alert uuid (if specified only returns the specific alert policy) + type: str + required: false +""" + + +EXAMPLES = r""" +- name: Get Droplet Monitoring alerts polices + community.digitalocean.digital_ocean_monitoring_alerts_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + register: monitoring_alerts + +- name: Get specific Droplet Monitoring alerts policy + community.digitalocean.digital_ocean_monitoring_alerts_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + uuid: ec48b0e7-23bb-4a7f-95f2-d83da62fcd60 + register: monitoring_alert +""" + + +RETURN = r""" +data: + description: DigitalOcean Monitoring alerts policies + returned: changed + type: dict + sample: + data: + - alerts: + email: + - mamercad@gmail.com + slack: [] + compare: GreaterThan + description: Droplet load1 alert + enabled: true + entities: + - '262383737' + tags: + - my_alert_tag + type: v1/insights/droplet/load_1 + uuid: ec48b0e7-23bb-4a7f-95f2-d83da62fcd60 + value: 3.14159 + window: 5m +""" + + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOMonitoringAlertsInfo(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # Pop these values so we don't include them in the POST data + self.module.params.pop("oauth_token") + + def get_alerts(self): + alerts = self.rest.get_paginated_data( + base_url="monitoring/alerts?", data_key_name="policies" + ) + self.module.exit_json( + changed=False, + data=alerts, + ) + + def get_alert(self, uuid): + alerts = self.rest.get_paginated_data( + base_url="monitoring/alerts?", data_key_name="policies" + ) + for alert in alerts: + alert_uuid = alert.get("uuid", None) + if alert_uuid is not None: + if alert_uuid == uuid: + self.module.exit_json( + changed=False, + data=alert, + ) + else: + self.module.fail_json( + changed=False, msg="Unexpected error; please file a bug: get_alert" + ) + self.module.exit_json( + changed=False, + data=[], + ) + + +def run(module): + alerts = DOMonitoringAlertsInfo(module) + uuid = module.params.get("uuid", None) + if uuid is None: + alerts.get_alerts() + else: + alerts.get_alert(uuid) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=["present"], default="present"), + oauth_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + required=True, + ), + uuid=dict(type="str", required=False), + ), + supports_check_mode=True, + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_project.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_project.py new file mode 100644 index 00000000..e0333883 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_project.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: digital_ocean_project +short_description: Manage a DigitalOcean project +description: + - Manage a project in DigitalOcean +author: "Tyler Auerbeck (@tylerauerbeck)" +version_added: 1.6.0 + +options: + state: + description: + - Indicate desired state of the target. + - C(present) will create the project + - C(absent) will delete the project, if it exists. + default: present + choices: ['present', 'absent'] + type: str + oauth_token: + description: + - DigitalOcean OAuth token. Can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + aliases: ['API_TOKEN'] + type: str + required: true + environment: + description: + - The environment of the projects resources. + choices: ['Development', 'Staging', 'Production'] + type: str + is_default: + description: + - If true, all resources will be added to this project if no project is specified. + default: False + type: bool + name: + description: + - The human-readable name for the project. The maximum length is 175 characters and the name must be unique. + type: str + id: + description: + - UUID of the project + type: str + purpose: + description: + - The purpose of the project. The maximum length is 255 characters + - Required if state is C(present) + - If not one of DO provided purposes, will be prefixed with C(Other) + - DO provided purposes can be found below + - C(Just trying out DigitalOcean) + - C(Class project/Educational Purposes) + - C(Website or blog) + - C(Web Application) + - C(Service or API) + - C(Mobile Application) + - C(Machine Learning/AI/Data Processing) + - C(IoT) + - C(Operational/Developer tooling) + type: str + description: + description: + - The description of the project. The maximum length is 255 characters. + type: str +""" + + +EXAMPLES = r""" +# Creates a new project +- community.digitalocean.digital_ocean_project: + name: "TestProj" + state: "present" + description: "This is a test project" + purpose: "IoT" + environment: "Development" + +# Updates the existing project with the new environment +- community.digitalocean.digital_ocean_project: + name: "TestProj" + state: "present" + description: "This is a test project" + purpose: "IoT" + environment: "Production" + +# This renames an existing project by utilizing the id of the project +- community.digitalocean.digital_ocean_project: + name: "TestProj2" + id: "12312312-abcd-efgh-ijkl-123123123123" + state: "present" + description: "This is a test project" + purpose: "IoT" + environment: "Development" + +# This creates a project that results with a purpose of "Other: My Prod App" +- community.digitalocean.digital_ocean_project: + name: "ProdProj" + state: "present" + description: "This is a prod app" + purpose: "My Prod App" + environment: "Production" + +# This removes a project +- community.digitalocean.digital_ocean_project: + name: "ProdProj" + state: "absent" +""" + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects +data: + description: a DigitalOcean Project + returned: changed + type: dict + sample: { + "project": { + "created_at": "2021-05-28T00:00:00Z", + "description": "This is a test description", + "environment": "Development", + "id": "12312312-abcd-efgh-1234-abcdefgh123", + "is_default": false, + "name": "Test123", + "owner_id": 1234567, + "owner_uuid": "12312312-1234-5678-abcdefghijklm", + "purpose": "IoT", + "updated_at": "2021-05-29T00:00:00Z", + } + } +""" + +import time +import json +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOProject(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.id = None + self.name = None + self.purpose = None + self.description = None + self.environment = None + self.is_default = None + + def get_by_id(self, project_id): + if not project_id: + return None + response = self.rest.get("projects/{0}".format(project_id)) + json_data = response.json + if response.status_code == 200: + project = json_data.get("project", None) + if project is not None: + self.id = project.get("id", None) + self.name = project.get("name", None) + self.purpose = project.get("purpose", None) + self.description = project.get("description", None) + self.environment = project.get("environment", None) + self.is_default = project.get("is_default", None) + return json_data + return None + + def get_by_name(self, project_name): + if not project_name: + return None + page = 1 + while page is not None: + response = self.rest.get("projects?page={0}".format(page)) + json_data = response.json + if response.status_code == 200: + for project in json_data["projects"]: + if project.get("name", None) == project_name: + self.id = project.get("id", None) + self.name = project.get("name", None) + self.description = project.get("description", None) + self.purpose = project.get("purpose", None) + self.environment = project.get("environment", None) + self.is_default = project.get("is_default", None) + return {"project": project} + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return None + + def get_project(self): + json_data = self.get_by_id(self.module.params["id"]) + if not json_data: + json_data = self.get_by_name(self.module.params["name"]) + return json_data + + def create(self, state): + json_data = self.get_project() + request_params = dict(self.module.params) + + if json_data is not None: + changed = False + valid_purpose = [ + "Just trying out DigitalOcean", + "Class project/Educational Purposes", + "Website or blog", + "Web Application", + "Service or API", + "Mobile Application", + "Machine Learning/AI/Data Processing", + "IoT", + "Operational/Developer tooling", + ] + for key in request_params.keys(): + if ( + key == "purpose" + and request_params[key] is not None + and request_params[key] not in valid_purpose + ): + param = "Other: " + request_params[key] + else: + param = request_params[key] + + if json_data["project"][key] != param and param is not None: + changed = True + + if changed: + response = self.rest.put( + "projects/{0}".format(json_data["project"]["id"]), + data=request_params, + ) + if response.status_code != 200: + self.module.fail_json(changed=False, msg="Unable to update project") + self.module.exit_json(changed=True, data=response.json) + else: + self.module.exit_json(changed=False, data=json_data) + else: + response = self.rest.post("projects", data=request_params) + + if response.status_code != 201: + self.module.fail_json(changed=False, msg="Unable to create project") + self.module.exit_json(changed=True, data=response.json) + + def delete(self): + json_data = self.get_project() + if json_data: + if self.module.check_mode: + self.module.exit_json(changed=True) + response = self.rest.delete( + "projects/{0}".format(json_data["project"]["id"]) + ) + json_data = response.json + if response.status_code == 204: + self.module.exit_json(changed=True, msg="Project deleted") + self.module.fail_json(changed=False, msg="Failed to delete project") + else: + self.module.exit_json(changed=False, msg="Project not found") + + +def core(module): + state = module.params.pop("state") + project = DOProject(module) + if state == "present": + project.create(state) + elif state == "absent": + project.delete() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=["present", "absent"], default="present", type="str"), + oauth_token=dict( + aliases=["API_TOKEN"], + no_log=True, + fallback=( + env_fallback, + ["DO_API_TOKEN", "DO_API_KEY", "DO_OAUTH_TOKEN"], + ), + required=True, + ), + name=dict(type="str"), + id=dict(type="str"), + description=dict(type="str"), + purpose=dict(type="str"), + is_default=dict(type="bool", default=False), + environment=dict( + choices=["Development", "Staging", "Production"], type="str" + ), + ), + required_one_of=(["id", "name"],), + required_if=( + [ + ("state", "present", ["purpose"]), + ] + ), + ) + + core(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_project_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_project_info.py new file mode 100644 index 00000000..0c6ac670 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_project_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2020, Tyler Auerbeck <tauerbec@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_project_info +short_description: Gather information about DigitalOcean Projects +description: + - This module can be used to gather information about Projects. +author: "Tyler Auerbeck (@tylerauerbeck)" +version_added: 1.6.0 + +options: + id: + description: + - Project ID that can be used to identify and reference a project. + type: str + name: + description: + - Project name that can be used to identify and reference a project. + type: str + +extends_documentation_fragment: +- community.digitalocean.digital_ocean +""" + + +EXAMPLES = r""" +# Get specific project by id +- community.digitalocean.digital_ocean_project_info: + id: cb1ef55e-3cd8-4c7c-aa5d-07c32bf41627 + +# Get specific project by name +- community.digitalocean.digital_ocean_project_info: + name: my-project-name + +# Get all projects +- community.digitalocean.digital_ocean_project_info: + register: projects +""" + +RETURN = r""" +data: + description: "DigitalOcean project information" + elements: dict + returned: success + type: list + sample: + - created_at: "2021-03-11T00:00:00Z" + description: "My project description" + environment: "Development" + id: "12345678-abcd-efgh-5678-10111213" + is_default: false + name: "my-test-project" + owner_id: 12345678 + owner_uuid: "12345678-1234-4321-abcd-20212223" + purpose: "" + updated_at: "2021-03-11T00:00:00Z" +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +def run(module): + rest = DigitalOceanHelper(module) + + if module.params["id"]: + response = rest.get("projects/{0}".format(module.params["id"])) + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'projects' information due to error: %s" + % response.json["message"] + ) + else: + response = rest.get_paginated_data( + base_url="projects?", data_key_name="projects" + ) + + if module.params["id"]: + data = [response.json["project"]] + elif module.params["name"]: + data = [d for d in response if d["name"] == module.params["name"]] + if not data: + module.fail_json( + msg="Failed to fetch 'projects' information due to error: Unable to find project with name %s" + % module.params["name"] + ) + else: + data = response + + module.exit_json(changed=False, data=data) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=False, default=None), + id=dict(type="str", required=False, default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[("id", "name")], + ) + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_region_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_region_facts.py new file mode 100644 index 00000000..9ffb8ff3 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_region_facts.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_region_info +short_description: Gather information about DigitalOcean regions +description: + - This module can be used to gather information about regions. + - This module was called C(digital_ocean_region_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: Gather information about all regions + community.digitalocean.digital_ocean_region_info: + oauth_token: "{{ oauth_token }}" + +- name: Get Name of region where slug is known + community.digitalocean.digital_ocean_region_info: + oauth_token: "{{ oauth_token }}" + register: resp_out +- debug: var=resp_out +- set_fact: + region_slug: "{{ item.name }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?slug==`nyc1`]" +- debug: + var: region_slug +""" + + +RETURN = r""" +data: + description: DigitalOcean regions information + returned: success + type: list + sample: [ + { + "available": true, + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata", + "install_agent", + "storage" + ], + "name": "New York 1", + "sizes": [ + "512mb", + "s-1vcpu-1gb", + "1gb", + "s-3vcpu-1gb", + "s-1vcpu-2gb", + "s-2vcpu-2gb", + "2gb", + "s-1vcpu-3gb", + "s-2vcpu-4gb", + "4gb", + "c-2", + "m-1vcpu-8gb", + "8gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "16gb" + ], + "slug": "nyc1" + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + base_url = "regions?" + regions = rest.get_paginated_data(base_url=base_url, data_key_name="regions") + + module.exit_json(changed=False, data=regions) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_region_facts", + "community.digitalocean.digital_ocean_region_facts", + ): + module.deprecate( + "The 'digital_ocean_region_facts' module has been renamed to 'digital_ocean_region_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_region_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_region_info.py new file mode 100644 index 00000000..9ffb8ff3 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_region_info.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_region_info +short_description: Gather information about DigitalOcean regions +description: + - This module can be used to gather information about regions. + - This module was called C(digital_ocean_region_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: Gather information about all regions + community.digitalocean.digital_ocean_region_info: + oauth_token: "{{ oauth_token }}" + +- name: Get Name of region where slug is known + community.digitalocean.digital_ocean_region_info: + oauth_token: "{{ oauth_token }}" + register: resp_out +- debug: var=resp_out +- set_fact: + region_slug: "{{ item.name }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?slug==`nyc1`]" +- debug: + var: region_slug +""" + + +RETURN = r""" +data: + description: DigitalOcean regions information + returned: success + type: list + sample: [ + { + "available": true, + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata", + "install_agent", + "storage" + ], + "name": "New York 1", + "sizes": [ + "512mb", + "s-1vcpu-1gb", + "1gb", + "s-3vcpu-1gb", + "s-1vcpu-2gb", + "s-2vcpu-2gb", + "2gb", + "s-1vcpu-3gb", + "s-2vcpu-4gb", + "4gb", + "c-2", + "m-1vcpu-8gb", + "8gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "16gb" + ], + "slug": "nyc1" + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + base_url = "regions?" + regions = rest.get_paginated_data(base_url=base_url, data_key_name="regions") + + module.exit_json(changed=False, data=regions) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_region_facts", + "community.digitalocean.digital_ocean_region_facts", + ): + module.deprecate( + "The 'digital_ocean_region_facts' module has been renamed to 'digital_ocean_region_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_size_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_size_facts.py new file mode 100644 index 00000000..19aba33e --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_size_facts.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_size_info +short_description: Gather information about DigitalOcean Droplet sizes +description: + - This module can be used to gather information about droplet sizes. + - This module was called C(digital_ocean_size_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all droplet sizes + community.digitalocean.digital_ocean_size_info: + oauth_token: "{{ oauth_token }}" + +- name: Get droplet Size Slug where vcpus is 1 + community.digitalocean.digital_ocean_size_info: + oauth_token: "{{ oauth_token }}" + register: resp_out +- debug: var=resp_out +- set_fact: + size_slug: "{{ item.slug }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?vcpus==`1`]" +- debug: + var: size_slug + + +""" + + +RETURN = r""" +data: + description: DigitalOcean droplet size information + returned: success + type: list + sample: [ + { + "available": true, + "disk": 20, + "memory": 512, + "price_hourly": 0.00744, + "price_monthly": 5.0, + "regions": [ + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sgp1", + "tor1" + ], + "slug": "512mb", + "transfer": 1.0, + "vcpus": 1 + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("sizes") + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'sizes' information due to error : %s" + % response.json["message"] + ) + + module.exit_json(changed=False, data=response.json["sizes"]) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_size_facts", + "community.digitalocean.digital_ocean_size_facts", + ): + module.deprecate( + "The 'digital_ocean_size_facts' module has been renamed to 'digital_ocean_size_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_size_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_size_info.py new file mode 100644 index 00000000..19aba33e --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_size_info.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_size_info +short_description: Gather information about DigitalOcean Droplet sizes +description: + - This module can be used to gather information about droplet sizes. + - This module was called C(digital_ocean_size_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all droplet sizes + community.digitalocean.digital_ocean_size_info: + oauth_token: "{{ oauth_token }}" + +- name: Get droplet Size Slug where vcpus is 1 + community.digitalocean.digital_ocean_size_info: + oauth_token: "{{ oauth_token }}" + register: resp_out +- debug: var=resp_out +- set_fact: + size_slug: "{{ item.slug }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?vcpus==`1`]" +- debug: + var: size_slug + + +""" + + +RETURN = r""" +data: + description: DigitalOcean droplet size information + returned: success + type: list + sample: [ + { + "available": true, + "disk": 20, + "memory": 512, + "price_hourly": 0.00744, + "price_monthly": 5.0, + "regions": [ + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sgp1", + "tor1" + ], + "slug": "512mb", + "transfer": 1.0, + "vcpus": 1 + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("sizes") + if response.status_code != 200: + module.fail_json( + msg="Failed to fetch 'sizes' information due to error : %s" + % response.json["message"] + ) + + module.exit_json(changed=False, data=response.json["sizes"]) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_size_facts", + "community.digitalocean.digital_ocean_size_facts", + ): + module.deprecate( + "The 'digital_ocean_size_facts' module has been renamed to 'digital_ocean_size_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot.py new file mode 100644 index 00000000..67dc0100 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2021, Mark Mercado <mamercad@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_snapshot +short_description: Create and delete DigitalOcean snapshots +version_added: 1.7.0 +description: + - This module can be used to create and delete DigitalOcean Droplet and volume snapshots. +author: "Mark Mercado (@mamercad)" +options: + state: + description: + - Whether the snapshot should be present (created) or absent (deleted). + default: present + choices: + - present + - absent + type: str + snapshot_type: + description: + - Specifies the type of snapshot information to be create or delete. + - If set to C(droplet), then a Droplet snapshot is created. + - If set to C(volume), then a volume snapshot is created. + choices: + - droplet + - volume + default: droplet + type: str + snapshot_name: + description: + - Name of the snapshot to create. + type: str + snapshot_tags: + description: + - List of tags to apply to the volume snapshot. + - Only applies to volume snapshots (not Droplets). + type: list + elements: str + droplet_id: + description: + - Droplet ID to snapshot. + type: str + volume_id: + description: + - Volume ID to snapshot. + type: str + snapshot_id: + description: + - Snapshot ID to delete. + type: str + wait: + description: + - Wait for the snapshot to be created before returning. + default: True + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds, when creating a snapshot. + default: 120 + type: int +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Snapshot a Droplet + community.digitalocean.digital_ocean_snapshot: + state: present + snapshot_type: droplet + droplet_id: 250329179 + register: result + +- name: Delete a Droplet snapshot + community.digitalocean.digital_ocean_snapshot: + state: absent + snapshot_type: droplet + snapshot_id: 85905825 + register: result + +- name: Snapshot a Volume + community.digitalocean.digital_ocean_snapshot: + state: present + snapshot_type: volume + snapshot_name: mysnapshot1 + volume_id: 9db5e329-cc68-11eb-b027-0a58ac144f91 + +- name: Delete a Volume snapshot + community.digitalocean.digital_ocean_snapshot: + state: absent + snapshot_type: volume + snapshot_id: a902cdba-cc68-11eb-a701-0a58ac145708 +""" + + +RETURN = r""" +data: + description: Snapshot creation or deletion action. + returned: success + type: dict + sample: + - completed_at: '2021-06-14T12:36:00Z' + id: 1229119156 + region: + available: true + features: + - backups + - ipv6 + - metadata + - install_agent + - storage + - image_transfer + name: New York 1 + sizes: + - s-1vcpu-1gb + - s-1vcpu-1gb-amd + - s-1vcpu-1gb-intel + - <snip> + slug: nyc1 + region_slug: nyc1 + resource_id: 250445117 + resource_type: droplet + started_at: '2021-06-14T12:35:25Z' + status: completed + type: snapshot + - created_at: '2021-06-14T12:55:10Z' + id: c06d4a86-cd0f-11eb-b13c-0a58ac145472 + min_disk_size: 1 + name: my-snapshot-1 + regions: + - nbg1 + resource_id: f0adea7e-cd0d-11eb-b027-0a58ac144f91 + resource_type: volume + size_gigabytes: 0 + tags: + - tag1 + - tag2 +""" + + +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOSnapshot(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + self.wait = self.module.params.pop("wait", True) + self.wait_timeout = self.module.params.pop("wait_timeout", 120) + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.snapshot_type = module.params["snapshot_type"] + self.snapshot_name = module.params["snapshot_name"] + self.snapshot_tags = module.params["snapshot_tags"] + self.snapshot_id = module.params["snapshot_id"] + self.volume_id = module.params["volume_id"] + + def wait_finished(self): + current_time = time.monotonic() + end_time = current_time + self.wait_timeout + while current_time < end_time: + response = self.rest.get("actions/{0}".format(str(self.action_id))) + status = response.status_code + if status != 200: + self.module.fail_json( + msg="Unable to find action {0}, please file a bug".format( + str(self.action_id) + ) + ) + json = response.json + if json["action"]["status"] == "completed": + return json + time.sleep(10) + self.module.fail_json( + msg="Timed out waiting for snapshot, action {0}".format(str(self.action_id)) + ) + + def create(self): + if self.module.check_mode: + return self.module.exit_json(changed=True) + + if self.snapshot_type == "droplet": + droplet_id = self.module.params["droplet_id"] + data = { + "type": "snapshot", + } + if self.snapshot_name is not None: + data["name"] = self.snapshot_name + response = self.rest.post( + "droplets/{0}/actions".format(str(droplet_id)), data=data + ) + status = response.status_code + json = response.json + if status == 201: + self.action_id = json["action"]["id"] + if self.wait: + json = self.wait_finished() + self.module.exit_json( + changed=True, + msg="Created snapshot, action {0}".format(self.action_id), + data=json["action"], + ) + self.module.exit_json( + changed=True, + msg="Created snapshot, action {0}".format(self.action_id), + data=json["action"], + ) + else: + self.module.fail_json( + changed=False, + msg="Failed to create snapshot: {0}".format(json["message"]), + ) + elif self.snapshot_type == "volume": + data = { + "name": self.snapshot_name, + "tags": self.snapshot_tags, + } + response = self.rest.post( + "volumes/{0}/snapshots".format(str(self.volume_id)), data=data + ) + status = response.status_code + json = response.json + if status == 201: + self.module.exit_json( + changed=True, + msg="Created snapshot, snapshot {0}".format(json["snapshot"]["id"]), + data=json["snapshot"], + ) + else: + self.module.fail_json( + changed=False, + msg="Failed to create snapshot: {0}".format(json["message"]), + ) + + def delete(self): + if self.module.check_mode: + return self.module.exit_json(changed=True) + + response = self.rest.delete("snapshots/{0}".format(str(self.snapshot_id))) + status = response.status_code + if status == 204: + self.module.exit_json( + changed=True, + msg="Deleted snapshot {0}".format(str(self.snapshot_id)), + ) + else: + json = response.json + self.module.fail_json( + changed=False, + msg="Failed to delete snapshot {0}: {1}".format( + self.snapshot_id, json["message"] + ), + ) + + +def run(module): + state = module.params.pop("state") + snapshot = DOSnapshot(module) + if state == "present": + snapshot.create() + elif state == "absent": + snapshot.delete() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + snapshot_type=dict( + type="str", required=False, choices=["droplet", "volume"], default="droplet" + ), + snapshot_name=dict(type="str"), + snapshot_tags=dict(type="list", elements="str", default=[]), + droplet_id=dict(type="str"), + volume_id=dict(type="str"), + snapshot_id=dict(type="str"), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ["state", "present", ["droplet_id", "volume_id"], True], + ["state", "absent", ["snapshot_id"]], + ], + mutually_exclusive=[["droplet_id", "volume_id"]], + supports_check_mode=True, + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot_facts.py new file mode 100644 index 00000000..19d3c77d --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot_facts.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_snapshot_info +short_description: Gather information about DigitalOcean Snapshot +description: + - This module can be used to gather information about snapshot information based upon provided values such as droplet, volume and snapshot id. + - This module was called C(digital_ocean_snapshot_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + snapshot_type: + description: + - Specifies the type of snapshot information to be retrieved. + - If set to C(droplet), then information are gathered related to snapshots based on Droplets only. + - If set to C(volume), then information are gathered related to snapshots based on volumes only. + - If set to C(by_id), then information are gathered related to snapshots based on snapshot id only. + - If not set to any of the above, then information are gathered related to all snapshots. + default: 'all' + choices: [ 'all', 'droplet', 'volume', 'by_id'] + required: false + type: str + snapshot_id: + description: + - To retrieve information about a snapshot, please specify this as a snapshot id. + - If set to actual snapshot id, then information are gathered related to that particular snapshot only. + - This is required parameter, if C(snapshot_type) is set to C(by_id). + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all snapshots + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: all + oauth_token: "{{ oauth_token }}" + +- name: Gather information about droplet snapshots + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: droplet + oauth_token: "{{ oauth_token }}" + +- name: Gather information about volume snapshots + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: volume + oauth_token: "{{ oauth_token }}" + +- name: Gather information about snapshot by snapshot id + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: by_id + snapshot_id: 123123123 + oauth_token: "{{ oauth_token }}" + +- name: Get information about snapshot named big-data-snapshot1 + community.digitalocean.digital_ocean_snapshot_info: + register: resp_out +- set_fact: + snapshot_id: "{{ item.id }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='big-data-snapshot1']" +- debug: + var: snapshot_id + +""" + + +RETURN = r""" +data: + description: DigitalOcean snapshot information + returned: success + elements: dict + type: list + sample: [ + { + "id": "4f60fc64-85d1-11e6-a004-000f53315871", + "name": "big-data-snapshot1", + "regions": [ + "nyc1" + ], + "created_at": "2016-09-28T23:14:30Z", + "resource_id": "89bcc42f-85cf-11e6-a004-000f53315871", + "resource_type": "volume", + "min_disk_size": 10, + "size_gigabytes": 0 + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + snapshot_type = module.params["snapshot_type"] + + rest = DigitalOceanHelper(module) + + base_url = "snapshots" + snapshot = [] + + if snapshot_type == "by_id": + base_url += "/{0}".format(module.params.get("snapshot_id")) + response = rest.get(base_url) + status_code = response.status_code + + if status_code != 200: + module.fail_json( + msg="Failed to fetch snapshot information due to error : %s" + % response.json["message"] + ) + + snapshot.extend(response.json["snapshots"]) + else: + if snapshot_type == "droplet": + base_url += "?resource_type=droplet&" + elif snapshot_type == "volume": + base_url += "?resource_type=volume&" + else: + base_url += "?" + + snapshot = rest.get_paginated_data(base_url=base_url, data_key_name="snapshots") + module.exit_json(changed=False, data=snapshot) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + snapshot_type=dict( + type="str", + required=False, + choices=["all", "droplet", "volume", "by_id"], + default="all", + ), + snapshot_id=dict(type="str", required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ["snapshot_type", "by_id", ["snapshot_id"]], + ], + ) + if module._name in ( + "digital_ocean_snapshot_facts", + "community.digitalocean.digital_ocean_snapshot_facts", + ): + module.deprecate( + "The 'digital_ocean_snapshot_facts' module has been renamed to 'digital_ocean_snapshot_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot_info.py new file mode 100644 index 00000000..19d3c77d --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_snapshot_info.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_snapshot_info +short_description: Gather information about DigitalOcean Snapshot +description: + - This module can be used to gather information about snapshot information based upon provided values such as droplet, volume and snapshot id. + - This module was called C(digital_ocean_snapshot_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + snapshot_type: + description: + - Specifies the type of snapshot information to be retrieved. + - If set to C(droplet), then information are gathered related to snapshots based on Droplets only. + - If set to C(volume), then information are gathered related to snapshots based on volumes only. + - If set to C(by_id), then information are gathered related to snapshots based on snapshot id only. + - If not set to any of the above, then information are gathered related to all snapshots. + default: 'all' + choices: [ 'all', 'droplet', 'volume', 'by_id'] + required: false + type: str + snapshot_id: + description: + - To retrieve information about a snapshot, please specify this as a snapshot id. + - If set to actual snapshot id, then information are gathered related to that particular snapshot only. + - This is required parameter, if C(snapshot_type) is set to C(by_id). + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all snapshots + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: all + oauth_token: "{{ oauth_token }}" + +- name: Gather information about droplet snapshots + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: droplet + oauth_token: "{{ oauth_token }}" + +- name: Gather information about volume snapshots + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: volume + oauth_token: "{{ oauth_token }}" + +- name: Gather information about snapshot by snapshot id + community.digitalocean.digital_ocean_snapshot_info: + snapshot_type: by_id + snapshot_id: 123123123 + oauth_token: "{{ oauth_token }}" + +- name: Get information about snapshot named big-data-snapshot1 + community.digitalocean.digital_ocean_snapshot_info: + register: resp_out +- set_fact: + snapshot_id: "{{ item.id }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='big-data-snapshot1']" +- debug: + var: snapshot_id + +""" + + +RETURN = r""" +data: + description: DigitalOcean snapshot information + returned: success + elements: dict + type: list + sample: [ + { + "id": "4f60fc64-85d1-11e6-a004-000f53315871", + "name": "big-data-snapshot1", + "regions": [ + "nyc1" + ], + "created_at": "2016-09-28T23:14:30Z", + "resource_id": "89bcc42f-85cf-11e6-a004-000f53315871", + "resource_type": "volume", + "min_disk_size": 10, + "size_gigabytes": 0 + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + snapshot_type = module.params["snapshot_type"] + + rest = DigitalOceanHelper(module) + + base_url = "snapshots" + snapshot = [] + + if snapshot_type == "by_id": + base_url += "/{0}".format(module.params.get("snapshot_id")) + response = rest.get(base_url) + status_code = response.status_code + + if status_code != 200: + module.fail_json( + msg="Failed to fetch snapshot information due to error : %s" + % response.json["message"] + ) + + snapshot.extend(response.json["snapshots"]) + else: + if snapshot_type == "droplet": + base_url += "?resource_type=droplet&" + elif snapshot_type == "volume": + base_url += "?resource_type=volume&" + else: + base_url += "?" + + snapshot = rest.get_paginated_data(base_url=base_url, data_key_name="snapshots") + module.exit_json(changed=False, data=snapshot) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + snapshot_type=dict( + type="str", + required=False, + choices=["all", "droplet", "volume", "by_id"], + default="all", + ), + snapshot_id=dict(type="str", required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ["snapshot_type", "by_id", ["snapshot_id"]], + ], + ) + if module._name in ( + "digital_ocean_snapshot_facts", + "community.digitalocean.digital_ocean_snapshot_facts", + ): + module.deprecate( + "The 'digital_ocean_snapshot_facts' module has been renamed to 'digital_ocean_snapshot_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_spaces.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_spaces.py new file mode 100644 index 00000000..0d101b50 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_spaces.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_spaces +short_description: Create and remove DigitalOcean Spaces. +description: + - Create and remove DigitalOcean Spaces. +author: Mark Mercado (@mamercad) +version_added: 1.15.0 +options: + state: + description: + - Whether the Space should be present or absent. + default: present + type: str + choices: ["present", "absent"] + name: + description: + - The name of the Spaces to create or delete. + required: true + type: str + region: + description: + - The region to create or delete the Space in. + aliases: ["region_id"] + required: true + type: str + aws_access_key_id: + description: + - The AWS_ACCESS_KEY_ID to use. + required: true + type: str + aliases: ["AWS_ACCESS_KEY_ID"] + aws_secret_access_key: + description: + - The AWS_SECRET_ACCESS_KEY to use. + required: true + type: str + aliases: ["AWS_SECRET_ACCESS_KEY"] +requirements: + - boto3 +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Create a Space in nyc3 + community.digitalocean.digital_ocean_spaces: + state: present + name: my-space + region: nyc3 + +- name: Delete a Space in nyc3 + community.digitalocean.digital_ocean_spaces: + state: absent + name: my-space + region: nyc3 +""" + + +RETURN = r""" +data: + description: DigitalOcean Space + returned: present + type: dict + sample: + space: + endpoint_url: https://nyc3.digitaloceanspaces.com + name: gh-ci-space-1 + region: nyc3 + space_url: https://gh-ci-space-1.nyc3.digitaloceanspaces.com +msg: + description: Informational message + returned: always + type: str + sample: Created Space gh-ci-space-1 in nyc3 +""" + +from ansible.module_utils.basic import ( + AnsibleModule, + missing_required_lib, + env_fallback, + to_native, +) +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from traceback import format_exc + +try: + import boto3 + + HAS_BOTO3 = True +except Exception: + HAS_BOTO3 = False + + +def run(module): + state = module.params.get("state") + name = module.params.get("name") + region = module.params.get("region") + aws_access_key_id = module.params.get("aws_access_key_id") + aws_secret_access_key = module.params.get("aws_secret_access_key") + + try: + session = boto3.session.Session() + client = session.client( + "s3", + region_name=region, + endpoint_url=f"https://{region}.digitaloceanspaces.com", + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + ) + response = client.list_buckets() + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + response_metadata = response.get("ResponseMetadata") + http_status_code = response_metadata.get("HTTPStatusCode") + + if http_status_code == 200: + spaces = [ + { + "name": space["Name"], + "region": region, + "endpoint_url": f"https://{region}.digitaloceanspaces.com", + "space_url": f"https://{space['Name']}.{region}.digitaloceanspaces.com", + } + for space in response["Buckets"] + ] + else: + module.fail_json(changed=False, msg=f"Failed to list Spaces in {region}") + + if state == "present": + for space in spaces: + if space["name"] == name: + module.exit_json(changed=False, data={"space": space}) + + if module.check_mode: + module.exit_json(changed=True, msg=f"Would create Space {name} in {region}") + + try: + response = client.create_bucket(Bucket=name) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + response_metadata = response.get("ResponseMetadata") + http_status_code = response_metadata.get("HTTPStatusCode") + if http_status_code == 200: + module.exit_json( + changed=True, + msg=f"Created Space {name} in {region}", + data={ + "space": { + "name": name, + "region": region, + "endpoint_url": f"https://{region}.digitaloceanspaces.com", + "space_url": f"https://{name}.{region}.digitaloceanspaces.com", + } + }, + ) + + module.fail_json( + changed=False, msg=f"Failed to create Space {name} in {region}" + ) + + elif state == "absent": + have_it = False + for space in spaces: + if space["name"] == name: + have_it = True + + if module.check_mode: + if have_it: + module.exit_json( + changed=True, msg=f"Would delete Space {name} in {region}" + ) + else: + module.exit_json(changed=False, msg=f"No Space {name} in {region}") + + if have_it: + try: + reponse = client.delete_bucket(Bucket=name) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + response_metadata = response.get("ResponseMetadata") + http_status_code = response_metadata.get("HTTPStatusCode") + if http_status_code == 200: + module.exit_json(changed=True, msg=f"Deleted Space {name} in {region}") + + module.fail_json( + changed=True, msg=f"Failed to delete Space {name} in {region}" + ) + + module.exit_json(changed=False, msg=f"No Space {name} in {region}") + + +def main(): + + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict(type="str", required=True), + region=dict(type="str", aliases=["region_id"], required=True), + aws_access_key_id=dict( + type="str", + aliases=["AWS_ACCESS_KEY_ID"], + fallback=(env_fallback, ["AWS_ACCESS_KEY_ID"]), + required=True, + no_log=True, + ), + aws_secret_access_key=dict( + type="str", + aliases=["AWS_SECRET_ACCESS_KEY"], + fallback=(env_fallback, ["AWS_SECRET_ACCESS_KEY"]), + required=True, + no_log=True, + ), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_BOTO3: + module.fail_json(msg=missing_required_lib("boto3")) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_spaces_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_spaces_info.py new file mode 100644 index 00000000..50f05002 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_spaces_info.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_spaces_info +short_description: List DigitalOcean Spaces. +description: + - List DigitalOcean Spaces. +author: Mark Mercado (@mamercad) +version_added: 1.15.0 +options: + state: + description: + - Only present is supported. + default: present + type: str + choices: ["present"] + region: + description: + - The region from which to list Spaces. + aliases: ["region_id"] + required: true + type: str + aws_access_key_id: + description: + - The AWS_ACCESS_KEY_ID to use. + required: true + type: str + aliases: ["AWS_ACCESS_KEY_ID"] + aws_secret_access_key: + description: + - The AWS_SECRET_ACCESS_KEY to use. + required: true + type: str + aliases: ["AWS_SECRET_ACCESS_KEY"] +requirements: + - boto3 +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: List all Spaces in nyc3 + community.digitalocean.digital_ocean_spaces_info: + state: present + region: nyc3 +""" + + +RETURN = r""" +data: + description: List of DigitalOcean Spaces + returned: always + type: dict + sample: + spaces: + - endpoint_url: https://nyc3.digitaloceanspaces.com + name: gh-ci-space + region: nyc3 + space_url: https://gh-ci-space.nyc3.digitaloceanspaces.com +""" + +from ansible.module_utils.basic import ( + AnsibleModule, + missing_required_lib, + env_fallback, + to_native, +) +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from traceback import format_exc + +try: + import boto3 + + HAS_BOTO3 = True +except Exception: + HAS_BOTO3 = False + + +def run(module): + state = module.params.get("state") + region = module.params.get("region") + aws_access_key_id = module.params.get("aws_access_key_id") + aws_secret_access_key = module.params.get("aws_secret_access_key") + + if state == "present": + try: + session = boto3.session.Session() + client = session.client( + "s3", + region_name=region, + endpoint_url=f"https://{region}.digitaloceanspaces.com", + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + ) + response = client.list_buckets() + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + response_metadata = response.get("ResponseMetadata") + http_status_code = response_metadata.get("HTTPStatusCode") + + if http_status_code == 200: + spaces = [ + { + "name": space["Name"], + "region": region, + "endpoint_url": f"https://{region}.digitaloceanspaces.com", + "space_url": f"https://{space['Name']}.{region}.digitaloceanspaces.com", + } + for space in response["Buckets"] + ] + module.exit_json(changed=False, data={"spaces": spaces}) + + module.fail_json(changed=False, msg=f"Failed to list Spaces in {region}") + + +def main(): + + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(type="str", choices=["present"], default="present"), + region=dict(type="str", aliases=["region_id"], required=True), + aws_access_key_id=dict( + type="str", + aliases=["AWS_ACCESS_KEY_ID"], + fallback=(env_fallback, ["AWS_ACCESS_KEY_ID"]), + required=True, + no_log=True, + ), + aws_secret_access_key=dict( + type="str", + aliases=["AWS_SECRET_ACCESS_KEY"], + fallback=(env_fallback, ["AWS_SECRET_ACCESS_KEY"]), + required=True, + no_log=True, + ), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_BOTO3: + module.fail_json(msg=missing_required_lib("boto3")) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey.py new file mode 100644 index 00000000..3a7e662b --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey.py @@ -0,0 +1,282 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_sshkey +short_description: Manage DigitalOcean SSH keys +description: + - Create/delete DigitalOcean SSH keys. +author: "Patrick Marques (@pmarques)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + fingerprint: + description: + - This is a unique identifier for the SSH key used to delete a key + aliases: ['id'] + type: str + name: + description: + - The name for the SSH key + type: str + ssh_pub_key: + description: + - The Public SSH key to add. + type: str +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +extends_documentation_fragment: + - community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: "Create ssh key" + community.digitalocean.digital_ocean_sshkey: + oauth_token: "{{ oauth_token }}" + name: "My SSH Public Key" + ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example" + state: present + register: result + +- name: "Delete ssh key" + community.digitalocean.digital_ocean_sshkey: + oauth_token: "{{ oauth_token }}" + state: "absent" + fingerprint: "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa" +""" + + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "ssh_key": { + "id": 512189, + "fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", + "name": "My SSH Public Key", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example" + } + } +""" + +import json +import hashlib +import base64 + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class Response(object): + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + def __init__(self, module, headers): + self.module = module + self.headers = headers + self.baseurl = module.params.get("baseurl") + + def _url_builder(self, path): + if path[0] == "/": + path = path[1:] + return "%s/%s" % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + timeout = self.module.params["timeout"] + + resp, info = fetch_url( + self.module, + url, + data=data, + headers=self.headers, + method=method, + timeout=timeout, + ) + + # Exceptions in fetch_url may result in a status -1, the ensures a + if info["status"] == -1: + self.module.fail_json(msg=info["msg"]) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send("GET", path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send("PUT", path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send("POST", path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send("DELETE", path, data, headers) + + +def core(module): + api_token = module.params["oauth_token"] + state = module.params["state"] + fingerprint = module.params["fingerprint"] + name = module.params["name"] + ssh_pub_key = module.params["ssh_pub_key"] + + rest = Rest( + module, + { + "Authorization": "Bearer {0}".format(api_token), + "Content-type": "application/json", + }, + ) + + fingerprint = fingerprint or ssh_key_fingerprint(module, ssh_pub_key) + response = rest.get("account/keys/{0}".format(fingerprint)) + status_code = response.status_code + json = response.json + + if status_code not in (200, 404): + module.fail_json( + msg="Error getting ssh key [{0}: {1}]".format( + status_code, response.json["message"] + ), + fingerprint=fingerprint, + ) + + if state in ("present"): + if status_code == 404: + # IF key not found create it! + + if module.check_mode: + module.exit_json(changed=True) + + payload = {"name": name, "public_key": ssh_pub_key} + response = rest.post("account/keys", data=payload) + status_code = response.status_code + json = response.json + if status_code == 201: + module.exit_json(changed=True, data=json) + + module.fail_json( + msg="Error creating ssh key [{0}: {1}]".format( + status_code, response.json["message"] + ) + ) + + elif status_code == 200: + # If key found was found, check if name needs to be updated + if name is None or json["ssh_key"]["name"] == name: + module.exit_json(changed=False, data=json) + + if module.check_mode: + module.exit_json(changed=True) + + payload = { + "name": name, + } + response = rest.put("account/keys/{0}".format(fingerprint), data=payload) + status_code = response.status_code + json = response.json + if status_code == 200: + module.exit_json(changed=True, data=json) + + module.fail_json( + msg="Error updating ssh key name [{0}: {1}]".format( + status_code, response.json["message"] + ), + fingerprint=fingerprint, + ) + + elif state in ("absent"): + if status_code == 404: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = rest.delete("account/keys/{0}".format(fingerprint)) + status_code = response.status_code + json = response.json + if status_code == 204: + module.exit_json(changed=True) + + module.fail_json( + msg="Error creating ssh key [{0}: {1}]".format( + status_code, response.json["message"] + ) + ) + + +def ssh_key_fingerprint(module, ssh_pub_key): + try: + key = ssh_pub_key.split(None, 2)[1] + fingerprint = hashlib.md5(base64.b64decode(key)).hexdigest() + return ":".join(a + b for a, b in zip(fingerprint[::2], fingerprint[1::2])) + except IndexError: + module.fail_json( + msg="This does not appear to be a valid public key. Please verify the format and value provided in ssh_public_key." + ) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + fingerprint=dict(aliases=["id"], required=False), + name=dict(required=False), + ssh_pub_key=dict(required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(("fingerprint", "ssh_pub_key"),), + supports_check_mode=True, + ) + + core(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey_facts.py new file mode 100644 index 00000000..cfc61310 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey_facts.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_sshkey_info +short_description: Gather information about DigitalOcean SSH keys +description: + - This module can be used to gather information about DigitalOcean SSH keys. + - This module replaces the C(digital_ocean_sshkey_facts) module. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: Gather information about DigitalOcean SSH keys + community.digitalocean.digital_ocean_sshkey_info: + oauth_token: "{{ my_do_key }}" + register: ssh_keys + +- name: Set facts based on the gathered information + set_fact: + pubkey: "{{ item.public_key }}" + loop: "{{ ssh_keys.data | community.general.json_query(ssh_pubkey) }}" + vars: + ssh_pubkey: "[?name=='ansible_ctrl']" + +- name: Print SSH public key + debug: + msg: "{{ pubkey }}" +""" + + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_keys +data: + description: List of SSH keys on DigitalOcean + returned: success and no resource constraint + type: list + elements: dict + sample: [ + { + "id": 512189, + "fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + "name": "My SSH Public Key" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account/keys") + status_code = response.status_code + json = response.json + if status_code == 200: + module.exit_json(changed=False, data=json["ssh_keys"]) + else: + module.fail_json( + msg="Error fetching SSH Key information [{0}: {1}]".format( + status_code, response.json["message"] + ) + ) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(), + supports_check_mode=True, + ) + if module._name in ( + "digital_ocean_sshkey_facts", + "community.digitalocean.digital_ocean_sshkey_facts", + ): + module.deprecate( + "The 'digital_ocean_sshkey_facts' module has been renamed to 'digital_ocean_sshkey_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + core(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey_info.py new file mode 100644 index 00000000..cfc61310 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_sshkey_info.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_sshkey_info +short_description: Gather information about DigitalOcean SSH keys +description: + - This module can be used to gather information about DigitalOcean SSH keys. + - This module replaces the C(digital_ocean_sshkey_facts) module. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: Gather information about DigitalOcean SSH keys + community.digitalocean.digital_ocean_sshkey_info: + oauth_token: "{{ my_do_key }}" + register: ssh_keys + +- name: Set facts based on the gathered information + set_fact: + pubkey: "{{ item.public_key }}" + loop: "{{ ssh_keys.data | community.general.json_query(ssh_pubkey) }}" + vars: + ssh_pubkey: "[?name=='ansible_ctrl']" + +- name: Print SSH public key + debug: + msg: "{{ pubkey }}" +""" + + +RETURN = r""" +# Digital Ocean API info https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_keys +data: + description: List of SSH keys on DigitalOcean + returned: success and no resource constraint + type: list + elements: dict + sample: [ + { + "id": 512189, + "fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + "name": "My SSH Public Key" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account/keys") + status_code = response.status_code + json = response.json + if status_code == 200: + module.exit_json(changed=False, data=json["ssh_keys"]) + else: + module.fail_json( + msg="Error fetching SSH Key information [{0}: {1}]".format( + status_code, response.json["message"] + ) + ) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(), + supports_check_mode=True, + ) + if module._name in ( + "digital_ocean_sshkey_facts", + "community.digitalocean.digital_ocean_sshkey_facts", + ): + module.deprecate( + "The 'digital_ocean_sshkey_facts' module has been renamed to 'digital_ocean_sshkey_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + core(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag.py new file mode 100644 index 00000000..083129a9 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_tag +short_description: Create and remove tag(s) to DigitalOcean resource. +description: + - Create and remove tag(s) to DigitalOcean resource. +author: "Victor Volle (@kontrafiktion)" +options: + name: + description: + - The name of the tag. The supported characters for names include + alphanumeric characters, dashes, and underscores. + required: true + type: str + resource_id: + description: + - The ID of the resource to operate on. + - The data type of resource_id is changed from integer to string since Ansible 2.5. + aliases: ['droplet_id'] + type: str + resource_type: + description: + - The type of resource to operate on. Currently, only tagging of + droplets is supported. + default: droplet + choices: ['droplet'] + type: str + state: + description: + - Whether the tag should be present or absent on the resource. + default: present + type: str + choices: ['present', 'absent'] +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. + They both refer to the v2 token. + - As of Ansible 2.0, Version 2 of the DigitalOcean API is used. + +requirements: + - "python >= 2.6" +""" + + +EXAMPLES = r""" +- name: Create a tag + community.digitalocean.digital_ocean_tag: + name: production + state: present + +- name: Tag a resource; creating the tag if it does not exist + community.digitalocean.digital_ocean_tag: + name: "{{ item }}" + resource_id: "73333005" + state: present + loop: + - staging + - dbserver + +- name: Untag a resource + community.digitalocean.digital_ocean_tag: + name: staging + resource_id: "73333005" + state: absent + +# Deleting a tag also untags all the resources that have previously been +# tagged with it +- name: Remove a tag + community.digitalocean.digital_ocean_tag: + name: dbserver + state: absent +""" + + +RETURN = r""" +data: + description: a DigitalOcean Tag resource + returned: success and no resource constraint + type: dict + sample: { + "tag": { + "name": "awesome", + "resources": { + "droplets": { + "count": 0, + "last_tagged": null + } + } + } + } +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + state = module.params["state"] + name = module.params["name"] + resource_id = module.params["resource_id"] + resource_type = module.params["resource_type"] + + rest = DigitalOceanHelper(module) + response = rest.get("tags/{0}".format(name)) + + if state == "present": + status_code = response.status_code + resp_json = response.json + changed = False + if status_code == 200 and resp_json["tag"]["name"] == name: + changed = False + else: + # Ensure Tag exists + response = rest.post("tags", data={"name": name}) + status_code = response.status_code + resp_json = response.json + if status_code == 201: + changed = True + elif status_code == 422: + changed = False + else: + module.exit_json(changed=False, data=resp_json) + + if resource_id is None: + # No resource defined, we're done. + module.exit_json(changed=changed, data=resp_json) + else: + # Check if resource is already tagged or not + found = False + url = "{0}?tag_name={1}".format(resource_type, name) + if resource_type == "droplet": + url = "droplets?tag_name={0}".format(name) + response = rest.get(url) + status_code = response.status_code + resp_json = response.json + if status_code == 200: + for resource in resp_json["droplets"]: + if not found and resource["id"] == int(resource_id): + found = True + break + if not found: + # If resource is not tagged, tag a resource + url = "tags/{0}/resources".format(name) + payload = { + "resources": [ + {"resource_id": resource_id, "resource_type": resource_type} + ] + } + response = rest.post(url, data=payload) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json( + msg="error tagging resource '{0}': {1}".format( + resource_id, response.json["message"] + ) + ) + else: + # Already tagged resource + module.exit_json(changed=False) + else: + # Unable to find resource specified by user + module.fail_json(msg=resp_json["message"]) + + elif state == "absent": + if response.status_code == 200: + if resource_id: + url = "tags/{0}/resources".format(name) + payload = { + "resources": [ + {"resource_id": resource_id, "resource_type": resource_type} + ] + } + response = rest.delete(url, data=payload) + else: + url = "tags/{0}".format(name) + response = rest.delete(url) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type="str", required=True), + resource_id=dict(aliases=["droplet_id"], type="str"), + resource_type=dict(choices=["droplet"], default="droplet"), + state=dict(choices=["present", "absent"], default="present"), + ) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag_facts.py new file mode 100644 index 00000000..43ccaa9c --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag_facts.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_tag_info +short_description: Gather information about DigitalOcean tags +description: + - This module can be used to gather information about DigitalOcean provided tags. + - This module was called C(digital_ocean_tag_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + tag_name: + description: + - Tag name that can be used to identify and reference a tag. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all tags + community.digitalocean.digital_ocean_tag_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about tag with given name + community.digitalocean.digital_ocean_tag_info: + oauth_token: "{{ oauth_token }}" + tag_name: "extra_awesome_tag" + +- name: Get resources from tag name + community.digitalocean.digital_ocean_tag_info: + register: resp_out +- set_fact: + resources: "{{ item.resources }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='extra_awesome_tag']" +- debug: + var: resources +""" + + +RETURN = r""" +data: + description: DigitalOcean tag information + returned: success + type: list + elements: dict + sample: [ + { + "name": "extra-awesome", + "resources": { + "droplets": { + "count": 1, + ... + } + } + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + tag_name = module.params.get("tag_name", None) + rest = DigitalOceanHelper(module) + + base_url = "tags" + if tag_name is not None: + response = rest.get("%s/%s" % (base_url, tag_name)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve tags for DigitalOcean") + + tag = [response.json["tag"]] + else: + tag = rest.get_paginated_data(base_url=base_url + "?", data_key_name="tags") + + module.exit_json(changed=False, data=tag) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + tag_name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_tag_facts", + "community.digitalocean.digital_ocean_tag_facts", + ): + module.deprecate( + "The 'digital_ocean_tag_facts' module has been renamed to 'digital_ocean_tag_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag_info.py new file mode 100644 index 00000000..43ccaa9c --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_tag_info.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_tag_info +short_description: Gather information about DigitalOcean tags +description: + - This module can be used to gather information about DigitalOcean provided tags. + - This module was called C(digital_ocean_tag_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + tag_name: + description: + - Tag name that can be used to identify and reference a tag. + required: false + type: str +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all tags + community.digitalocean.digital_ocean_tag_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about tag with given name + community.digitalocean.digital_ocean_tag_info: + oauth_token: "{{ oauth_token }}" + tag_name: "extra_awesome_tag" + +- name: Get resources from tag name + community.digitalocean.digital_ocean_tag_info: + register: resp_out +- set_fact: + resources: "{{ item.resources }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='extra_awesome_tag']" +- debug: + var: resources +""" + + +RETURN = r""" +data: + description: DigitalOcean tag information + returned: success + type: list + elements: dict + sample: [ + { + "name": "extra-awesome", + "resources": { + "droplets": { + "count": 1, + ... + } + } + }, + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + tag_name = module.params.get("tag_name", None) + rest = DigitalOceanHelper(module) + + base_url = "tags" + if tag_name is not None: + response = rest.get("%s/%s" % (base_url, tag_name)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve tags for DigitalOcean") + + tag = [response.json["tag"]] + else: + tag = rest.get_paginated_data(base_url=base_url + "?", data_key_name="tags") + + module.exit_json(changed=False, data=tag) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + tag_name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_tag_facts", + "community.digitalocean.digital_ocean_tag_facts", + ): + module.deprecate( + "The 'digital_ocean_tag_facts' module has been renamed to 'digital_ocean_tag_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_volume_facts.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_volume_facts.py new file mode 100644 index 00000000..4e2cc179 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_volume_facts.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_volume_info +short_description: Gather information about DigitalOcean volumes +description: + - This module can be used to gather information about DigitalOcean provided volumes. + - This module was called C(digital_ocean_volume_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + region_name: + description: + - Name of region to restrict results to volumes available in a specific region. + - Please use M(community.digitalocean.digital_ocean_region_info) for getting valid values related regions. + required: false + type: str +requirements: + - "python >= 2.6" + +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all volume + community.digitalocean.digital_ocean_volume_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about volume in given region + community.digitalocean.digital_ocean_volume_info: + region_name: nyc1 + oauth_token: "{{ oauth_token }}" + +- name: Get information about volume named nyc3-test-volume + community.digitalocean.digital_ocean_volume_info: + register: resp_out +- set_fact: + volume_id: "{{ item.id }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='nyc3-test-volume']" +- debug: var=volume_id +""" + + +RETURN = r""" +data: + description: DigitalOcean volume information + returned: success + type: list + sample: [ + { + "id": "506f78a4-e098-11e5-ad9f-000f53306ae1", + "region": { + "name": "New York 1", + "slug": "nyc1", + "sizes": [ + "s-1vcpu-1gb", + "s-1vcpu-2gb", + "s-1vcpu-3gb", + "s-2vcpu-2gb", + "s-3vcpu-1gb", + "s-2vcpu-4gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "s-8vcpu-32gb", + "s-12vcpu-48gb", + "s-16vcpu-64gb", + "s-20vcpu-96gb", + "s-24vcpu-128gb", + "s-32vcpu-192gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "droplet_ids": [ + + ], + "name": "example", + "description": "Block store for examples", + "size_gigabytes": 10, + "created_at": "2016-03-02T17:00:49Z" + } + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + region_name = module.params.get("region_name", None) + + rest = DigitalOceanHelper(module) + + base_url = "volumes?" + if region_name is not None: + base_url += "region=%s&" % region_name + + volumes = rest.get_paginated_data(base_url=base_url, data_key_name="volumes") + + module.exit_json(changed=False, data=volumes) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + region_name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_volume_facts", + "community.digitalocean.digital_ocean_volume_facts", + ): + module.deprecate( + "The 'digital_ocean_volume_facts' module has been renamed to 'digital_ocean_volume_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_volume_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_volume_info.py new file mode 100644 index 00000000..4e2cc179 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_volume_info.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_volume_info +short_description: Gather information about DigitalOcean volumes +description: + - This module can be used to gather information about DigitalOcean provided volumes. + - This module was called C(digital_ocean_volume_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + region_name: + description: + - Name of region to restrict results to volumes available in a specific region. + - Please use M(community.digitalocean.digital_ocean_region_info) for getting valid values related regions. + required: false + type: str +requirements: + - "python >= 2.6" + +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Gather information about all volume + community.digitalocean.digital_ocean_volume_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about volume in given region + community.digitalocean.digital_ocean_volume_info: + region_name: nyc1 + oauth_token: "{{ oauth_token }}" + +- name: Get information about volume named nyc3-test-volume + community.digitalocean.digital_ocean_volume_info: + register: resp_out +- set_fact: + volume_id: "{{ item.id }}" + loop: "{{ resp_out.data | community.general.json_query(name) }}" + vars: + name: "[?name=='nyc3-test-volume']" +- debug: var=volume_id +""" + + +RETURN = r""" +data: + description: DigitalOcean volume information + returned: success + type: list + sample: [ + { + "id": "506f78a4-e098-11e5-ad9f-000f53306ae1", + "region": { + "name": "New York 1", + "slug": "nyc1", + "sizes": [ + "s-1vcpu-1gb", + "s-1vcpu-2gb", + "s-1vcpu-3gb", + "s-2vcpu-2gb", + "s-3vcpu-1gb", + "s-2vcpu-4gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "s-8vcpu-32gb", + "s-12vcpu-48gb", + "s-16vcpu-64gb", + "s-20vcpu-96gb", + "s-24vcpu-128gb", + "s-32vcpu-192gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "droplet_ids": [ + + ], + "name": "example", + "description": "Block store for examples", + "size_gigabytes": 10, + "created_at": "2016-03-02T17:00:49Z" + } + ] +""" + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) +from ansible.module_utils._text import to_native + + +def core(module): + region_name = module.params.get("region_name", None) + + rest = DigitalOceanHelper(module) + + base_url = "volumes?" + if region_name is not None: + base_url += "region=%s&" % region_name + + volumes = rest.get_paginated_data(base_url=base_url, data_key_name="volumes") + + module.exit_json(changed=False, data=volumes) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + region_name=dict(type="str", required=False), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name in ( + "digital_ocean_volume_facts", + "community.digitalocean.digital_ocean_volume_facts", + ): + module.deprecate( + "The 'digital_ocean_volume_facts' module has been renamed to 'digital_ocean_volume_info'", + version="2.0.0", + collection_name="community.digitalocean", + ) # was Ansible 2.13 + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_vpc.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_vpc.py new file mode 100644 index 00000000..598ec2bf --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_vpc.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2021, Mark Mercado <mamercad@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_vpc +short_description: Create and delete DigitalOcean VPCs +version_added: 1.7.0 +description: + - This module can be used to create and delete DigitalOcean VPCs. +author: "Mark Mercado (@mamercad)" +options: + state: + description: + - Whether the VPC should be present (created) or absent (deleted). + default: present + choices: + - present + - absent + type: str + name: + description: + - The name of the VPC. + - Must be unique and contain alphanumeric characters, dashes, and periods only. + type: str + required: true + description: + description: + - A free-form text field for describing the VPC's purpose. + - It may be a maximum of 255 characters. + type: str + default: + description: + - A boolean value indicating whether or not the VPC is the default network for the region. + - All applicable resources are placed into the default VPC network unless otherwise specified during their creation. + - The C(default) field cannot be unset from C(true). + - If you want to set a new default VPC network, update the C(default) field of another VPC network in the same region. + - The previous network's C(default) field will be set to C(false) when a new default VPC has been defined. + type: bool + default: false + region: + description: + - The slug identifier for the region where the VPC will be created. + type: str + ip_range: + description: + - The requested range of IP addresses for the VPC in CIDR notation. + - Network ranges cannot overlap with other networks in the same account and must be in range of private addresses as defined in RFC1918. + - It may not be smaller than /24 nor larger than /16. + - If no IP range is specified, a /20 network range is generated that won't conflict with other VPC networks in your account. + type: str +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation + +""" + + +EXAMPLES = r""" +- name: Create a VPC + community.digitalocean.digital_ocean_vpc: + state: present + name: myvpc1 + region: nyc1 + +- name: Create a VPC (choose IP range) + community.digitalocean.digital_ocean_vpc: + state: present + name: myvpc1 + region: nyc1 + ip_range: 192.168.192.0/24 + +- name: Update a VPC (make it default) + community.digitalocean.digital_ocean_vpc: + state: present + name: myvpc1 + region: nyc1 + default: true + +- name: Update a VPC (change description) + community.digitalocean.digital_ocean_vpc: + state: present + name: myvpc1 + region: nyc1 + description: myvpc + +- name: Delete a VPC + community.digitalocean.digital_ocean_vpc: + state: absent + name: myvpc1 +""" + + +RETURN = r""" +data: + description: A DigitalOcean VPC. + returned: success + type: dict + sample: + msg: Created VPC myvpc1 in nyc1 + vpc: + created_at: '2021-06-17T11:43:12.12121565Z' + default: false + description: '' + id: a3b72d97-192f-4984-9d71-08a5faf2e0c7 + ip_range: 10.116.16.0/20 + name: testvpc1 + region: nyc1 + urn: do:vpc:a3b72d97-192f-4984-9d71-08a5faf2e0c7 +""" + + +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOVPC(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.name = module.params.get("name", None) + self.description = module.params.get("description", None) + self.default = module.params.get("default", False) + self.region = module.params.get("region", None) + self.ip_range = module.params.get("ip_range", None) + self.vpc_id = module.params.get("vpc_id", None) + + def get_by_name(self): + page = 1 + while page is not None: + response = self.rest.get("vpcs?page={0}".format(page)) + json_data = response.json + if response.status_code == 200: + for vpc in json_data["vpcs"]: + if vpc.get("name", None) == self.name: + return vpc + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return None + + def create(self): + if self.module.check_mode: + return self.module.exit_json(changed=True) + + vpc = self.get_by_name() + if vpc is not None: # update + vpc_id = vpc.get("id", None) + if vpc_id is not None: + data = { + "name": self.name, + } + if self.description is not None: + data["description"] = self.description + if self.default is not False: + data["default"] = True + response = self.rest.put("vpcs/{0}".format(vpc_id), data=data) + json = response.json + if response.status_code != 200: + self.module.fail_json( + msg="Failed to update VPC {0} in {1}: {2}".format( + self.name, self.region, json["message"] + ) + ) + else: + self.module.exit_json( + changed=False, + data=json, + msg="Updated VPC {0} in {1}".format(self.name, self.region), + ) + else: + self.module.fail_json( + changed=False, msg="Unexpected error, please file a bug" + ) + + else: # create + data = { + "name": self.name, + "region": self.region, + } + if self.description is not None: + data["description"] = self.description + if self.ip_range is not None: + data["ip_range"] = self.ip_range + + response = self.rest.post("vpcs", data=data) + status = response.status_code + json = response.json + if status == 201: + self.module.exit_json( + changed=True, + data=json, + msg="Created VPC {0} in {1}".format(self.name, self.region), + ) + else: + self.module.fail_json( + changed=False, + msg="Failed to create VPC {0} in {1}: {2}".format( + self.name, self.region, json["message"] + ), + ) + + def delete(self): + if self.module.check_mode: + return self.module.exit_json(changed=True) + + vpc = self.get_by_name() + if vpc is None: + self.module.fail_json( + msg="Unable to find VPC {0} in {1}".format(self.name, self.region) + ) + else: + vpc_id = vpc.get("id", None) + if vpc_id is not None: + response = self.rest.delete("vpcs/{0}".format(str(vpc_id))) + status = response.status_code + json = response.json + if status == 204: + self.module.exit_json( + changed=True, + msg="Deleted VPC {0} in {1} ({2})".format( + self.name, self.region, vpc_id + ), + ) + else: + json = response.json + self.module.fail_json( + changed=False, + msg="Failed to delete VPC {0} ({1}): {2}".format( + self.name, vpc_id, json["message"] + ), + ) + + +def run(module): + state = module.params.pop("state") + vpc = DOVPC(module) + if state == "present": + vpc.create() + elif state == "absent": + vpc.delete() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=["present", "absent"], default="present"), + name=dict(type="str", required=True), + description=dict(type="str"), + default=dict(type="bool", default=False), + region=dict(type="str"), + ip_range=dict(type="str"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ["state", "present", ["name", "region"]], + ["state", "absent", ["name"]], + ], + supports_check_mode=True, + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_vpc_info.py b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_vpc_info.py new file mode 100644 index 00000000..c2b11078 --- /dev/null +++ b/ansible_collections/community/digitalocean/plugins/modules/digital_ocean_vpc_info.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2021, Mark Mercado <mamercad@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: digital_ocean_vpc_info +short_description: Gather information about DigitalOcean VPCs +version_added: 1.7.0 +description: + - This module can be used to gather information about DigitalOcean VPCs. +author: "Mark Mercado (@mamercad)" +options: + members: + description: + - Return VPC members (instead of all VPCs). + type: bool + default: False + name: + description: + - The name of the VPC. + type: str +extends_documentation_fragment: +- community.digitalocean.digital_ocean.documentation +""" + + +EXAMPLES = r""" +- name: Fetch all VPCs + community.digitalocean.digital_ocean_vpc_info: + register: my_vpcs + +- name: Fetch members of a VPC + community.digitalocean.digital_ocean_vpc_info: + members: true + name: myvpc1 + register: my_vpc_members +""" + + +RETURN = r""" +data: + description: All DigitalOcean VPCs, or, members of a VPC (with C(members=True)). + returned: success + type: dict + sample: + - created_at: '2021-02-06T17:57:22Z' + default: true + description: '' + id: 0db3519b-9efc-414a-8868-8f2e6934688c + ip_range: 10.116.0.0/20 + name: default-nyc1 + region: nyc1 + urn: do:vpc:0db3519b-9efc-414a-8868-8f2e6934688c + - links: {} + members: [] + meta: + total: 0 +""" + + +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.digitalocean.plugins.module_utils.digital_ocean import ( + DigitalOceanHelper, +) + + +class DOVPCInfo(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + # pop the oauth token so we don't include it in the POST data + self.module.params.pop("oauth_token") + self.name = self.module.params.pop("name", "") + self.members = self.module.params.pop("members", False) + + def get_by_name(self): + page = 1 + while page is not None: + response = self.rest.get("vpcs?page={0}".format(page)) + json_data = response.json + if response.status_code == 200: + for vpc in json_data["vpcs"]: + if vpc.get("name", None) == self.name: + return vpc + if ( + "links" in json_data + and "pages" in json_data["links"] + and "next" in json_data["links"]["pages"] + ): + page += 1 + else: + page = None + return None + + def get(self): + if self.module.check_mode: + return self.module.exit_json(changed=False) + + if not self.members: + base_url = "vpcs?" + vpcs = self.rest.get_paginated_data(base_url=base_url, data_key_name="vpcs") + self.module.exit_json(changed=False, data=vpcs) + else: + vpc = self.get_by_name() + if vpc is not None: + vpc_id = vpc.get("id", None) + if vpc_id is not None: + response = self.rest.get("vpcs/{0}/members".format(vpc_id)) + json = response.json + if response.status_code != 200: + self.module.fail_json( + msg="Failed to find VPC named {0}: {1}".format( + self.name, json["message"] + ) + ) + else: + self.module.exit_json(changed=False, data=json) + else: + self.module.fail_json( + changed=False, msg="Unexpected error, please file a bug" + ) + else: + self.module.fail_json( + changed=False, + msg="Could not find a VPC named {0}".format(self.name), + ) + + +def run(module): + vpcs = DOVPCInfo(module) + vpcs.get() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + members=dict(type="bool", default=False), + name=dict(type="str"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ["members", True, ["name"]], + ], + supports_check_mode=True, + ) + + run(module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/digitalocean/scripts/inventory/digital_ocean.ini b/ansible_collections/community/digitalocean/scripts/inventory/digital_ocean.ini new file mode 100644 index 00000000..b809554b --- /dev/null +++ b/ansible_collections/community/digitalocean/scripts/inventory/digital_ocean.ini @@ -0,0 +1,34 @@ +# Ansible DigitalOcean external inventory script settings +# + +[digital_ocean] + +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY +# +#api_token = 123456abcdefg + + +# API calls to DigitalOcean may be slow. For this reason, we cache the results +# of an API call. Set this to the path you want cache files to be written to. +# One file will be written to this directory: +# - ansible-digital_ocean.cache +# +cache_path = /tmp + + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# +cache_max_age = 300 + +# Use the private network IP address instead of the public when available. +# +use_private_network = False + +# Pass variables to every group, e.g.: +# +# group_variables = { 'ansible_user': 'root' } +# +group_variables = {} diff --git a/ansible_collections/community/digitalocean/scripts/inventory/digital_ocean.py b/ansible_collections/community/digitalocean/scripts/inventory/digital_ocean.py new file mode 100755 index 00000000..1d5685c2 --- /dev/null +++ b/ansible_collections/community/digitalocean/scripts/inventory/digital_ocean.py @@ -0,0 +1,622 @@ +#!/usr/bin/env python + +""" +DigitalOcean external inventory script +====================================== + +Generates Ansible inventory of DigitalOcean Droplets. + +In addition to the --list and --host options used by Ansible, there are options +for generating JSON of other DigitalOcean data. This is useful when creating +droplets. For example, --regions will return all the DigitalOcean Regions. +This information can also be easily found in the cache file, whose default +location is /tmp/ansible-digital_ocean.cache). + +The --pretty (-p) option pretty-prints the output for better human readability. + +---- +Although the cache stores all the information received from DigitalOcean, +the cache is not used for current droplet information (in --list, --host, +--all, and --droplets). This is so that accurate droplet information is always +found. You can force this script to use the cache with --force-cache. + +---- +Configuration is read from `digital_ocean.ini`, then from environment variables, +and then from command-line arguments. + +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' + +Alternatively, it can be passed on the command-line with --api-token. + +If you specify DigitalOcean credentials in the INI file, a handy way to +get them into your environment (e.g., to use the digital_ocean module) +is to use the output of the --env option with export: + export $(digital_ocean.py --env) + +---- +The following groups are generated from --list: + - ID (droplet ID) + - NAME (droplet NAME) + - digital_ocean + - image_ID + - image_NAME + - distro_NAME (distribution NAME from image) + - region_NAME + - size_NAME + - status_STATUS + +For each host, the following variables are registered: + - do_backup_ids + - do_created_at + - do_disk + - do_features - list + - do_id + - do_image - object + - do_ip_address + - do_private_ip_address + - do_kernel - object + - do_locked + - do_memory + - do_name + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list + - do_status + - do_tags + - do_vcpus + - do_volume_ids + +----- +``` +usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] + [--regions] [--images] [--sizes] [--ssh-keys] + [--domains] [--tags] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] [--force-cache] + [--refresh-cache] [--env] [--api-token API_TOKEN] + +Produce an Ansible Inventory file based on DigitalOcean credentials + +optional arguments: + -h, --help show this help message and exit + --list List all active Droplets as Ansible inventory + (default: True) + --host HOST Get all Ansible inventory variables about the Droplet + with the given ID + --all List all DigitalOcean information as JSON + --droplets, -d List Droplets as JSON + --regions List Regions as JSON + --images List Images as JSON + --sizes List Sizes as JSON + --ssh-keys List SSH keys as JSON + --domains List Domains as JSON + --tags List Tags as JSON + --pretty, -p Pretty-print results + --cache-path CACHE_PATH + Path to the cache files (default: .) + --cache-max_age CACHE_MAX_AGE + Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache + --refresh-cache, -r Force refresh of cache by making API requests to + DigitalOcean (default: False - use cache files) + --env, -e Display DO_API_TOKEN + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token +``` + +""" + +# (c) 2013, Evan Wies <evan@neomantra.net> +# (c) 2017, Ansible Project +# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com> +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +###################################################################### + +import argparse +import ast +import os +import re +import requests +import sys +from time import time + +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + +import json + + +class DoManager: + def __init__(self, api_token): + self.api_token = api_token + self.api_endpoint = "https://api.digitalocean.com/v2" + self.headers = { + "Authorization": "Bearer {0}".format(self.api_token), + "Content-type": "application/json", + } + self.timeout = 60 + + def _url_builder(self, path): + if path[0] == "/": + path = path[1:] + return "%s/%s" % (self.api_endpoint, path) + + def send(self, url, method="GET", data=None, params=None): + url = self._url_builder(url) + data = json.dumps(data) + try: + if method == "GET": + resp_data = {} + incomplete = True + while incomplete: + resp = requests.get( + url, + data=data, + params=params, + headers=self.headers, + timeout=self.timeout, + ) + json_resp = resp.json() + + for key, value in json_resp.items(): + if isinstance(value, list) and key in resp_data: + resp_data[key] += value + else: + resp_data[key] = value + + try: + url = json_resp["links"]["pages"]["next"] + except KeyError: + incomplete = False + + except ValueError as e: + sys.exit("Unable to parse result from %s: %s" % (url, e)) + return resp_data + + def all_active_droplets(self, tag_name=None): + if tag_name is not None: + params = {"tag_name": tag_name} + resp = self.send("droplets/", params=params) + else: + resp = self.send("droplets/") + return resp["droplets"] + + def all_regions(self): + resp = self.send("regions/") + return resp["regions"] + + def all_images(self, filter_name="global"): + params = {"filter": filter_name} + resp = self.send("images/", data=params) + return resp["images"] + + def sizes(self): + resp = self.send("sizes/") + return resp["sizes"] + + def all_ssh_keys(self): + resp = self.send("account/keys") + return resp["ssh_keys"] + + def all_domains(self): + resp = self.send("domains/") + return resp["domains"] + + def show_droplet(self, droplet_id): + resp = self.send("droplets/%s" % droplet_id) + return resp.get("droplet", {}) + + def all_tags(self): + resp = self.send("tags") + return resp["tags"] + + +class DigitalOceanInventory(object): + + ########################################################################### + # Main execution path + ########################################################################### + + def __init__(self): + """Main execution path""" + + # DigitalOceanInventory data + self.data = {} # All DigitalOcean data + self.inventory = {} # Ansible Inventory + + # Define defaults + self.cache_path = "." + self.cache_max_age = 0 + self.use_private_network = False + self.group_variables = {} + self.droplets_tag_name = None + + # Read settings, environment variables, and CLI arguments + self.read_settings() + self.read_environment() + self.read_cli_args() + + # Verify credentials were set + if not hasattr(self, "api_token"): + msg = ( + "Could not find values for DigitalOcean api_token. They must be specified via either ini file, " + "command line argument (--api-token), or environment variables (DO_API_TOKEN)\n" + ) + sys.stderr.write(msg) + sys.exit(-1) + + # env command, show DigitalOcean credentials + if self.args.env: + print("DO_API_TOKEN=%s" % self.api_token) + sys.exit(0) + + # Manage cache + self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" + self.cache_refreshed = False + + if self.is_cache_valid(): + self.load_from_cache() + if len(self.data) == 0: + if self.args.force_cache: + sys.stderr.write("Cache is empty and --force-cache was specified\n") + sys.exit(-1) + + self.manager = DoManager(self.api_token) + + # Pick the json_data to print based on the CLI command + if self.args.droplets: + self.load_from_digital_ocean("droplets") + json_data = {"droplets": self.data["droplets"]} + elif self.args.regions: + self.load_from_digital_ocean("regions") + json_data = {"regions": self.data["regions"]} + elif self.args.images: + self.load_from_digital_ocean("images") + json_data = {"images": self.data["images"]} + elif self.args.sizes: + self.load_from_digital_ocean("sizes") + json_data = {"sizes": self.data["sizes"]} + elif self.args.ssh_keys: + self.load_from_digital_ocean("ssh_keys") + json_data = {"ssh_keys": self.data["ssh_keys"]} + elif self.args.domains: + self.load_from_digital_ocean("domains") + json_data = {"domains": self.data["domains"]} + elif self.args.tags: + self.load_from_digital_ocean("tags") + json_data = {"tags": self.data["tags"]} + elif self.args.all: + self.load_from_digital_ocean() + json_data = self.data + elif self.args.host: + json_data = self.load_droplet_variables_for_host() + else: # '--list' this is last to make it default + self.load_from_digital_ocean("droplets") + self.build_inventory() + json_data = self.inventory + + if self.cache_refreshed: + self.write_to_cache() + + if self.args.pretty: + print(json.dumps(json_data, indent=2)) + else: + print(json.dumps(json_data)) + + ########################################################################### + # Script configuration + ########################################################################### + + def read_settings(self): + """Reads the settings from the digital_ocean.ini file""" + config = ConfigParser.ConfigParser() + config_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "digital_ocean.ini" + ) + config.read(config_path) + + # Credentials + if config.has_option("digital_ocean", "api_token"): + self.api_token = config.get("digital_ocean", "api_token") + + # Cache related + if config.has_option("digital_ocean", "cache_path"): + self.cache_path = config.get("digital_ocean", "cache_path") + if config.has_option("digital_ocean", "cache_max_age"): + self.cache_max_age = config.getint("digital_ocean", "cache_max_age") + + # Private IP Address + if config.has_option("digital_ocean", "use_private_network"): + self.use_private_network = config.getboolean( + "digital_ocean", "use_private_network" + ) + + # Group variables + if config.has_option("digital_ocean", "group_variables"): + self.group_variables = ast.literal_eval( + config.get("digital_ocean", "group_variables") + ) + + # Droplet tag_name + if config.has_option("droplets", "tag_name"): + self.droplets_tag_name = config.get("droplets", "tag_name") + + def read_environment(self): + """Reads the settings from environment variables""" + # Setup credentials + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") + + def read_cli_args(self): + """Command line argument processing""" + parser = argparse.ArgumentParser( + description="Produce an Ansible Inventory file based on DigitalOcean credentials" + ) + + parser.add_argument( + "--list", + action="store_true", + help="List all active Droplets as Ansible inventory (default: True)", + ) + parser.add_argument( + "--host", + action="store", + type=int, + help="Get all Ansible inventory variables about the Droplet with the given ID", + ) + + parser.add_argument( + "--all", + action="store_true", + help="List all DigitalOcean information as JSON", + ) + parser.add_argument( + "--droplets", "-d", action="store_true", help="List Droplets as JSON" + ) + parser.add_argument( + "--regions", action="store_true", help="List Regions as JSON" + ) + parser.add_argument("--images", action="store_true", help="List Images as JSON") + parser.add_argument("--sizes", action="store_true", help="List Sizes as JSON") + parser.add_argument( + "--ssh-keys", action="store_true", help="List SSH keys as JSON" + ) + parser.add_argument( + "--domains", action="store_true", help="List Domains as JSON" + ) + parser.add_argument("--tags", action="store_true", help="List Tags as JSON") + + parser.add_argument( + "--pretty", "-p", action="store_true", help="Pretty-print results" + ) + + parser.add_argument( + "--cache-path", action="store", help="Path to the cache files (default: .)" + ) + parser.add_argument( + "--cache-max_age", + action="store", + help="Maximum age of the cached items (default: 0)", + ) + parser.add_argument( + "--force-cache", + action="store_true", + default=False, + help="Only use data from the cache", + ) + parser.add_argument( + "--refresh-cache", + "-r", + action="store_true", + default=False, + help="Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)", + ) + + parser.add_argument( + "--env", "-e", action="store_true", help="Display DO_API_TOKEN" + ) + parser.add_argument( + "--api-token", "-a", action="store", help="DigitalOcean API Token" + ) + + self.args = parser.parse_args() + + if self.args.api_token: + self.api_token = self.args.api_token + + # Make --list default if none of the other commands are specified + if ( + not self.args.droplets + and not self.args.regions + and not self.args.images + and not self.args.sizes + and not self.args.ssh_keys + and not self.args.domains + and not self.args.tags + and not self.args.all + and not self.args.host + ): + self.args.list = True + + ########################################################################### + # Data Management + ########################################################################### + + def load_from_digital_ocean(self, resource=None): + """Get JSON from DigitalOcean API""" + if self.args.force_cache and os.path.isfile(self.cache_filename): + return + # We always get fresh droplets + if self.is_cache_valid() and not (resource == "droplets" or resource is None): + return + if self.args.refresh_cache: + resource = None + + if resource == "droplets" or resource is None: + self.data["droplets"] = self.manager.all_active_droplets( + tag_name=self.droplets_tag_name + ) + self.cache_refreshed = True + if resource == "regions" or resource is None: + self.data["regions"] = self.manager.all_regions() + self.cache_refreshed = True + if resource == "images" or resource is None: + self.data["images"] = self.manager.all_images() + self.cache_refreshed = True + if resource == "sizes" or resource is None: + self.data["sizes"] = self.manager.sizes() + self.cache_refreshed = True + if resource == "ssh_keys" or resource is None: + self.data["ssh_keys"] = self.manager.all_ssh_keys() + self.cache_refreshed = True + if resource == "domains" or resource is None: + self.data["domains"] = self.manager.all_domains() + self.cache_refreshed = True + if resource == "tags" or resource is None: + self.data["tags"] = self.manager.all_tags() + self.cache_refreshed = True + + def add_inventory_group(self, key): + """Method to create group dict""" + host_dict = {"hosts": [], "vars": {}} + self.inventory[key] = host_dict + return + + def add_host(self, group, host): + """Helper method to reduce host duplication""" + if group not in self.inventory: + self.add_inventory_group(group) + + if host not in self.inventory[group]["hosts"]: + self.inventory[group]["hosts"].append(host) + return + + def build_inventory(self): + """Build Ansible inventory of droplets""" + self.inventory = { + "all": {"hosts": [], "vars": self.group_variables}, + "_meta": {"hostvars": {}}, + } + + # add all droplets by id and name + for droplet in self.data["droplets"]: + for net in droplet["networks"]["v4"]: + if net["type"] == "public": + droplet["ip_address"] = net["ip_address"] + elif net["type"] == "private": + droplet["private_ip_address"] = net["ip_address"] + + host_indentifier = droplet["ip_address"] + if self.use_private_network and droplet["private_ip_address"]: + host_indentifier = droplet["private_ip_address"] + + self.inventory["all"]["hosts"].append(host_indentifier) + + self.add_host(droplet["id"], host_indentifier) + + self.add_host(droplet["name"], host_indentifier) + + # groups that are always present + for group in ( + "digital_ocean", + "region_" + droplet["region"]["slug"], + "image_" + str(droplet["image"]["id"]), + "size_" + droplet["size"]["slug"], + "distro_" + + DigitalOceanInventory.to_safe(droplet["image"]["distribution"]), + "status_" + droplet["status"], + ): + self.add_host(group, host_indentifier) + + # groups that are not always present + for group in (droplet["image"]["slug"], droplet["image"]["name"]): + if group: + image = "image_" + DigitalOceanInventory.to_safe(group) + self.add_host(image, host_indentifier) + + if droplet["tags"]: + for tag in droplet["tags"]: + self.add_host(tag, host_indentifier) + + # hostvars + info = self.do_namespace(droplet) + self.inventory["_meta"]["hostvars"][host_indentifier] = info + + def load_droplet_variables_for_host(self): + """Generate a JSON response to a --host call""" + droplet = self.manager.show_droplet(self.args.host) + info = self.do_namespace(droplet) + return info + + ########################################################################### + # Cache Management + ########################################################################### + + def is_cache_valid(self): + """Determines if the cache files have expired, or if it is still valid""" + if os.path.isfile(self.cache_filename): + mod_time = os.path.getmtime(self.cache_filename) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + return False + + def load_from_cache(self): + """Reads the data from the cache file and assigns it to member variables as Python Objects""" + try: + with open(self.cache_filename, "r") as cache: + json_data = cache.read() + data = json.loads(json_data) + except IOError: + data = {"data": {}, "inventory": {}} + + self.data = data["data"] + self.inventory = data["inventory"] + + def write_to_cache(self): + """Writes data in JSON format to a file""" + data = {"data": self.data, "inventory": self.inventory} + json_data = json.dumps(data, indent=2) + + with open(self.cache_filename, "w") as cache: + cache.write(json_data) + + ########################################################################### + # Utilities + ########################################################################### + @staticmethod + def to_safe(word): + """Converts 'bad' characters in a string to underscores so they can be used as Ansible groups""" + return re.sub(r"[^A-Za-z0-9\-.]", "_", word) + + @staticmethod + def do_namespace(data): + """Returns a copy of the dictionary with all the keys put in a 'do_' namespace""" + info = {} + for k, v in data.items(): + info["do_" + k] = v + return info + + +########################################################################### +# Run the script +DigitalOceanInventory() diff --git a/ansible_collections/community/digitalocean/tests/integration/integration_config.yml.template b/ansible_collections/community/digitalocean/tests/integration/integration_config.yml.template new file mode 100644 index 00000000..d742bfd2 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/integration_config.yml.template @@ -0,0 +1,4 @@ +--- +do_api_key: ${DO_API_KEY} +aws_access_key_id: ${AWS_ACCESS_KEY_ID} +aws_secret_access_key: ${AWS_SECRET_ACCESS_KEY} diff --git a/ansible_collections/community/digitalocean/tests/integration/requirements.txt b/ansible_collections/community/digitalocean/tests/integration/requirements.txt new file mode 100644 index 00000000..1a82e9a3 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/requirements.txt @@ -0,0 +1,2 @@ +jmespath +boto3 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_account_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_account_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_account_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_account_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_account_info/tasks/main.yml new file mode 100644 index 00000000..95f7cbf9 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_account_info/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Query account information + community.digitalocean.digital_ocean_account_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify account information idempotency and format + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data.droplet_limit is defined + - result.data.droplet_limit > 0 + - result.data.email is defined + - result.data.email_verified is defined + - result.data.floating_ip_limit is defined + - result.data.floating_ip_limit > 0 + - result.data.status == "active" + - result.data.status_message is defined + - result.data.uuid is defined + - result.data.volume_limit > 0 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/tasks/main.yml new file mode 100644 index 00000000..669d43b0 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_balance_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather balance information + community.digitalocean.digital_ocean_balance_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify balance info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/defaults/main.yml new file mode 100644 index 00000000..0e52c2cb --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/defaults/main.yml @@ -0,0 +1,7 @@ +do_region: nyc1 +volume_name: gh-ci-volume +volume_size: 15 +volume_down_size: 10 +volume_up_size: 20 +timeout: 900 +project_name: gh-ci-project diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/tasks/main.yml new file mode 100644 index 00000000..5d24d062 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_block_storage/tasks/main.yml @@ -0,0 +1,138 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure the volume is absent + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: absent + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify volume is absent + ansible.builtin.assert: + that: + - not result.changed + + - name: Create a volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: present + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_size }}" + register: result + + - name: Verify volume is present + ansible.builtin.assert: + that: + - result.changed + + - name: Resize to same size + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: present + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_size }}" + register: result + + - name: Verify resizing to same size is ignored + ansible.builtin.assert: + that: + - not result.changed + + - name: Shrink the volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: present + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_down_size }}" + ignore_errors: true + register: result + + - name: Verify shrinking fails + ansible.builtin.assert: + that: + - result.failed + + - name: Grow the volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: present + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_up_size }}" + timeout: "{{ timeout }}" + register: result + + - name: Verify growing volume succeeded + ansible.builtin.assert: + that: + - result.changed + + - name: Remove the volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: absent + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify the volume is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Create a volume (and associate with Project) + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: present + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_size }}" + project: "{{ project_name }}" + register: result + + - name: Verify volume is present + ansible.builtin.assert: + that: + - result.changed + - result.assign_status is defined + - result.assign_status == "assigned" + - result.msg is defined + - "'Assigned do:volume' in result.msg" + - result.resources is defined + - result.resources.status is defined + - result.resources.status == "assigned" + + always: + + - name: Remove the volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: absent + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify the volume is deleted + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/defaults/main.yml new file mode 100644 index 00000000..17810a67 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/defaults/main.yml @@ -0,0 +1,2 @@ +endpoint: ansible-gh-ci-space-0.nyc3.cdn.digitaloceanspaces.com +origin: ansible-gh-ci-space-0.nyc3.digitaloceanspaces.com diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/tasks/main.yml new file mode 100644 index 00000000..e1f5af51 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints/tasks/main.yml @@ -0,0 +1,67 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create DigitalOcean CDN Endpoint + community.digitalocean.digital_ocean_cdn_endpoints: + state: present + oauth_token: "{{ do_api_key }}" + origin: "{{ origin }}" + register: result + + - name: Verify CDN Endpoints created + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.endpoint is defined + - result.data.endpoint.endpoint == endpoint + - result.data.endpoint.origin == origin + - result.data.endpoint.ttl == 3600 + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Update DigitalOcean CDN Endpoint (change ttl to 600, default is 3600) + community.digitalocean.digital_ocean_cdn_endpoints: + state: present + oauth_token: "{{ do_api_key }}" + origin: "{{ origin }}" + ttl: 600 + register: result + + - name: Verify CDN Endpoints updated + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.endpoint is defined + - result.data.endpoint.endpoint == endpoint + - result.data.endpoint.origin == origin + - result.data.endpoint.ttl == 600 + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + always: + + - name: Delete DigitalOcean CDN Endpoint + community.digitalocean.digital_ocean_cdn_endpoints: + state: absent + oauth_token: "{{ do_api_key }}" + origin: "{{ origin }}" + register: result + + - name: Verify CDN Endpoints deleted + ansible.builtin.assert: + that: + - result.changed + - "'Deleted CDN Endpoint' in result.msg " diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/tasks/main.yml new file mode 100644 index 00000000..ff29d779 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_cdn_endpoints_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather CDN Endpoints information + community.digitalocean.digital_ocean_cdn_endpoints_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify CDN Endpoints fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/defaults/main.yml new file mode 100644 index 00000000..60055b0b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/defaults/main.yml @@ -0,0 +1,3 @@ +cert_name: gh-ci-cert +cert_key: "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDurdsTUxVhbokw\nWddnOTsst0GniShyldeEhGZZM8lO1qSedzeIlV/Hx5ja/bPvNGq30FoapGSAOfFS\nutRmrUwJc9M9UmgHXawOMoTkW8t4C2Uexzbys1iUpcZpArvqnFx317KBW41ukXzC\n9SSEtIWJE1FlunuaTV+8pmD+/1yb/3xSMp+NPspMLUo+K77uKgfY3cwooxQx5N8C\n0aEYfM2jwyQUM4QMLGJB3Xy5zwEBL10K7ZSS4Rgs2W5BLYNsgx5lFqIzvUPsdEe6\nXw1yFBXoC/inJosV0UNkS+qUAdd36PDsd79jaOtwfNW7Bbv2QnG6Byhl7E7Rzi8L\nOKsXAmMPAgMBAAECggEAKCI70EgMNFHt559zxPuuo5o9A/rgPkRL9RB+VUmL9CHL\nlInfYKwdau55GXnHo9VgjIAQZqJ188nKdmrMBGfnfT9oqcsNiw+JOunrxqpY0Fq6\nKi69YpodrwEhF89J4e1g7xXqAi5u3cTBAddOyLHCMoZ5DW4KEqwoCQTBEp/F5sT/\n6YsYEeLD6qUi8h66XXY4eLS+VHKWv1Eqms09OMJBrxj3Q9jrlmqX+ZDPE/hnkdKM\nPLo4fkSIDZ7rZ8NBEmhwWkH9eeJoZSS9rCXxwdyZ2r4B7ptFU7iYYCilgmXjo3pf\n3WdNn/gbyRvu/9jz2zWYG7Etkq+S/SX4URIvC3xtIQKBgQD/iurZYLvOaRG0GBN1\nHeor7wQ0zi0ej+haizSqS2tZUQUZhtHQCCV/fXsajVMoKuNGnG2Oqos2wWGzuEqH\nk9M9J2lMvXTMM0m1UsP3x+1ViAREzCX+/U81Y1oIuZNhF5prSR2VWtcLRs+OdK+R\npa/uwoDYl0yJrW+kXUZ6s2MmfQKBgQDvGzZEbpsx14+b7V+kIRP0eEVnMS6dOD5C\n9T9MVfRQJSYm3+RdVb9fjsAIq/wXCwcXeHqDJ683sb2VBlzI+sEZZWSsz8UF54IJ\n+YCOvxjR3BWIQsSdN3/ShCB9hQzY13qVEXvq3QrF1mzhh8E34Kkbg86TXVNZbcFg\nbBlmPr6JewKBgQDALjI2rvgu4HfO8DhZ4NiGl+ea6t+l12ZQCdbep3+hGTxzmR/b\nivTgb3cQXe6HC9IG/YerPnkaAc8/gL6UPckkeapIa0cWdw/czNTSWMoVhAhp0PpW\nTRyvsvs4CpBa+YxJWSBlfX6yXVuk7/5F08x/DEooGYkXFZcbjoZ9qsFLZQKBgACX\nTqEIrPWIlOBLy+q58jSuf21r37OfyfecZ5WmuL3h+aAq+YOJjLmNvsfGtQ7Rhij2\nsUbVuEco91EA/QW6KPQJHiCXOuNy2Q1xQnIX4tXR93Ooxpek5Xn9vZ7Rz2BR2Am6\nS1YfScGHcCl2jnG26nQY9U/D8UrNTKcSXVW1Xf81AoGAXA0+CaJXT72fedDkS6sL\nbW+YdgWkLj07jEhCE84PXHa3nlx2SkPtk0nEUJXOuZklt0PWlBUDxQLWncgqkdO7\n4JcbuolfnQVVMc/gKdcrEPNToJ7/VDAqJwC5CGxDhLOF+oR/cjiLRwrzHTdluIu4\n/qL2ac+WzLPvIRyzKSnGkWQ=\n-----END PRIVATE KEY-----" +cert_crt: "-----BEGIN CERTIFICATE-----\nMIID4DCCAsgCCQD9sx3XPtsBxzANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMC\nVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhOZXcgWW9yazEaMBgGA1UE\nCgwRRGlnaXRhbE9jZWFuLCBJbmMxEjAQBgNVBAsMCUNvbW11bml0eTEjMCEGA1UE\nAwwaY29tbXVuaXR5LmRpZ2l0YWxvY2Vhbi5jb20xJzAlBgkqhkiG9w0BCQEWGG5v\ncmVwbHlAZGlnaXRhbG9jZWFuLmNvbTAeFw0yMTA0MTcxNTEwNTBaFw0yMjA0MTcx\nNTEwNTBaMIGxMQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNV\nBAcMCE5ldyBZb3JrMRowGAYDVQQKDBFEaWdpdGFsT2NlYW4sIEluYzESMBAGA1UE\nCwwJQ29tbXVuaXR5MSMwIQYDVQQDDBpjb21tdW5pdHkuZGlnaXRhbG9jZWFuLmNv\nbTEnMCUGCSqGSIb3DQEJARYYbm9yZXBseUBkaWdpdGFsb2NlYW4uY29tMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7q3bE1MVYW6JMFnXZzk7LLdBp4ko\ncpXXhIRmWTPJTtaknnc3iJVfx8eY2v2z7zRqt9BaGqRkgDnxUrrUZq1MCXPTPVJo\nB12sDjKE5FvLeAtlHsc28rNYlKXGaQK76pxcd9eygVuNbpF8wvUkhLSFiRNRZbp7\nmk1fvKZg/v9cm/98UjKfjT7KTC1KPiu+7ioH2N3MKKMUMeTfAtGhGHzNo8MkFDOE\nDCxiQd18uc8BAS9dCu2UkuEYLNluQS2DbIMeZRaiM71D7HRHul8NchQV6Av4pyaL\nFdFDZEvqlAHXd+jw7He/Y2jrcHzVuwW79kJxugcoZexO0c4vCzirFwJjDwIDAQAB\nMA0GCSqGSIb3DQEBCwUAA4IBAQBFaoyvk2naUJLTmUjANhbSMqTmOcfZ6fGv0EKu\nm9LJEPMX0CbFWQABvE3/9o6wAlTHc92WLdHhc3hhhIsWpEMIQ7fm9k+RVOJzFAHw\nOEOB3QIrX1t1tEoISlYLr3IbI1lAzOE8PN1A9I8Jtdl1cJ49Bk4eX84P1Zxa2Lqm\nrzQM+J23jmWdVaTehXxjYGMVlmlphBfXlTy8Of5EZGfZPSRf95y3l8I8v+cPDF1Z\nk/2JCDsiG0knAtOJnagKLy/uWdewMeRhL6uzXvijWa/RFGT2aipV3psFc1fvcJI4\nJ8RFsYZPkxQo6f2l3G+GOdOdL34O919TsTYmLdcQ6+pvkwKH\n-----END CERTIFICATE-----" diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/tasks/main.yml new file mode 100644 index 00000000..5deeb3f9 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate/tasks/main.yml @@ -0,0 +1,61 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create a certificate + community.digitalocean.digital_ocean_certificate: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ cert_name }}" + private_key: "{{ cert_key }}" + leaf_certificate: "{{ cert_crt }}" + register: result + + - name: Ensure certificate was created + ansible.builtin.assert: + that: + - result.changed + - result.response is defined + - result.response.certificate is defined + - result.response.certificate.id is defined + - result.response.certificate.name is defined + - result.response.certificate.name == cert_name + + - name: Store the certificate id + ansible.builtin.set_fact: + certificate_id: "{{ result.response.certificate.id }}" + + - name: Gather information about the certificate + community.digitalocean.digital_ocean_certificate_info: + oauth_token: "{{ do_api_key }}" + certificate_id: "{{ certificate_id }}" + register: result + + - name: Ensure certificate information was gathered + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data | length == 1 + - result.data.0.name is defined + - result.data.0.name == cert_name + + always: + + - name: Delete a certificate + community.digitalocean.digital_ocean_certificate: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cert_name }}" + register: result + + - name: Ensure certificate was removed + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/tasks/main.yml new file mode 100644 index 00000000..8b4749f8 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_certificate_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather certificate information + community.digitalocean.digital_ocean_certificate_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify certificate information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/defaults/main.yml new file mode 100644 index 00000000..31c197f7 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/defaults/main.yml @@ -0,0 +1,7 @@ +do_region: nyc1 +database_name: gh-ci-database +database_engine: redis +database_size: db-s-1vcpu-1gb +database_num_nodes: 1 +database_wait_timeout: 900 +project_name: gh-ci-project diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/tasks/main.yml new file mode 100644 index 00000000..f14af913 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database/tasks/main.yml @@ -0,0 +1,176 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure database is absent (leftover) + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + ignore_errors: true # In case one was left from previous run + register: result + + - name: Ensure database is absent + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + register: result + + - name: Verify database is absent + ansible.builtin.assert: + that: + - not result.changed + + - name: Create the database + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + wait_timeout: "{{ database_wait_timeout }}" + register: result + + - name: Verify database is present + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.database is defined + - result.data.database.name is defined + - result.data.database.name == database_name + + - name: Create the database (again; idempotency check) + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + wait_timeout: "{{ database_wait_timeout }}" + register: result + + - name: Verify database is present + ansible.builtin.assert: + that: + - not result.changed + + - name: Fetch all databases + community.digitalocean.digital_ocean_database_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Ensure all databases found + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + + - name: Fetch this database + community.digitalocean.digital_ocean_database_info: + oauth_token: "{{ do_api_key }}" + name: "{{ database_name }}" + register: result + + - name: Ensure all databases found + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data.database is defined + - result.data.database.name is defined + - result.data.database.name == database_name + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Delete the database + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + register: result + + - name: Verify the database is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Create the database (and associate with Project) + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + wait_timeout: "{{ database_wait_timeout }}" + project: "{{ project_name }}" + register: result + + - name: Verify database is present + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.database is defined + - result.data.database.name is defined + - result.data.database.name == database_name + - result.assign_status is defined + - result.assign_status == "assigned" + - result.msg is defined + - "'Assigned do:dbaas' in result.msg" + - result.resources is defined + - result.resources.status is defined + - result.resources.status == "assigned" + + always: + + - name: Delete the database + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + register: result + + - name: Verify the database is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Delete the database (again; idempotency check) + community.digitalocean.digital_ocean_database: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ database_name }}" + region: "{{ do_region }}" + engine: "{{ database_engine }}" + size: "{{ database_size }}" + register: result + + - name: Verify the database is deleted + ansible.builtin.assert: + that: + - not result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/tasks/main.yml new file mode 100644 index 00000000..e526cef1 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_database_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Fetch all databases + community.digitalocean.digital_ocean_database_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Ensure all databases found + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/defaults/main.yml new file mode 100644 index 00000000..12e61b60 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/defaults/main.yml @@ -0,0 +1,2 @@ +domain_name: 0bb54d769d27ae8b4d16c8abddb7c03b767b4fa2.com +project_name: gh-ci-project diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/tasks/main.yml new file mode 100644 index 00000000..25bf7c0b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain/tasks/main.yml @@ -0,0 +1,124 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create the domain + community.digitalocean.digital_ocean_domain: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ domain_name }}" + ip: 127.0.0.1 + register: result + + - name: Ensure domain was created + ansible.builtin.assert: + that: + - result.changed + - result.domain is defined + - result.domain.name is defined + - result.domain.name == domain_name + + - name: Fetch domain information + community.digitalocean.digital_ocean_domain_info: + oauth_token: "{{ do_api_key }}" + domain_name: "{{ domain_name }}" + register: result + + - name: Ensure domain was found + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data | length == 1 + + - name: Create A record for domain + community.digitalocean.digital_ocean_domain_record: + oauth_token: "{{ do_api_key }}" + state: present + domain: "{{ domain_name }}" + type: A + name: www + data: 127.0.0.1 + register: result + + - name: Ensure domain record was created + ansible.builtin.assert: + that: + - result.changed + - not result.failed + - result.result.name == "www" + - result.result.data == "127.0.0.1" + + - name: Create AAAA record for domain + community.digitalocean.digital_ocean_domain_record: + oauth_token: "{{ do_api_key }}" + state: present + domain: "{{ domain_name }}" + type: AAAA + name: www2 + data: 0000:0000:0000:0000:0000:0000:0000:0001 + register: result + + - name: Ensure domain record was created + ansible.builtin.assert: + that: + - result.changed + - not result.failed + - result.result.name == "www2" + - result.result.data == "0000:0000:0000:0000:0000:0000:0000:0001" + + - name: Delete the domain + community.digitalocean.digital_ocean_domain: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ domain_name }}" + register: result + + - name: Ensure domain was deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Create the domain (and assign to Project) + community.digitalocean.digital_ocean_domain: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ domain_name }}" + ip: 127.0.0.1 + project: "{{ project_name }}" + register: result + + - name: Ensure domain was created + ansible.builtin.assert: + that: + - result.changed + - result.domain is defined + - result.domain.name is defined + - result.domain.name == domain_name + - result.assign_status is defined + - result.assign_status == "assigned" + - result.msg is defined + - "'Assigned do:domain' in result.msg" + - result.resources is defined + - result.resources.status is defined + - result.resources.status == "assigned" + + always: + + - name: Delete the domain + community.digitalocean.digital_ocean_domain: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ domain_name }}" + register: result + + - name: Ensure domain was deleted + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/tasks/main.yml new file mode 100644 index 00000000..314442d8 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather domain information + community.digitalocean.digital_ocean_domain_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify domain information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/defaults/main.yml new file mode 100644 index 00000000..7faa7ae7 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/defaults/main.yml @@ -0,0 +1,8 @@ +domain: a692934f7a1ffbe552dd.net +record_id: 296972271 +record_data: ns3.digitalocean.com +non_existent_domain: a692934f7a1ffbe552ddxxxxxxxxxx.net +non_existent_record_id: 29697227101010101010 +cname_record_id: 297696356 # cname foo +cname_record_data: bar.a692934f7a1ffbe552dd.net +cname_record_type: CNAME diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/tasks/main.yml new file mode 100644 index 00000000..4b76b5f4 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_domain_record_info/tasks/main.yml @@ -0,0 +1,107 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Get domain records for existing domain + community.digitalocean.digital_ocean_domain_record_info: + oauth_token: "{{ do_api_key }}" + domain: "{{ domain }}" + register: result + + - name: Verify domain information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data.records is defined + - result.data.records | type_debug == "list" + + - name: Get domain records for non-existent domain + community.digitalocean.digital_ocean_domain_record_info: + oauth_token: "{{ do_api_key }}" + domain: "{{ non_existent_domain }}" + register: result + + - name: Verify domain information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is not defined + - result.msg is defined + - result.msg is search("Error getting domain records") + + - name: Get NS domain records for existing domain + community.digitalocean.digital_ocean_domain_record_info: + oauth_token: "{{ do_api_key }}" + domain: "{{ domain }}" + type: NS + register: result + + - name: Verify domain information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data.records is defined + - result.data.records | type_debug == "list" + - result.data.records | community.general.json_query('[*].type') | unique | length == 1 + - result.data.records | community.general.json_query('[*].type') | unique | first == "NS" + + - name: Get specific domain record by ID + community.digitalocean.digital_ocean_domain_record_info: + oauth_token: "{{ do_api_key }}" + domain: "{{ domain }}" + record_id: "{{ record_id }}" + register: result + + - name: Verify domain information fetched + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + - result.data is defined + - result.data.records is defined + - result.data.records | length == 1 + - result.data.records.0.id == record_id + - result.data.records.0.data == record_data + + - name: Get non-existent domain record by ID + community.digitalocean.digital_ocean_domain_record_info: + oauth_token: "{{ do_api_key }}" + domain: "{{ domain }}" + record_id: "{{ non_existent_record_id }}" + register: result + + - name: Verify domain information fetched + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + - result.data is defined + - result.data.records is defined + - result.data.records | length == 0 + + - name: Get specific domain record by ID (CNAME) + community.digitalocean.digital_ocean_domain_record_info: + oauth_token: "{{ do_api_key }}" + domain: "{{ domain }}" + record_id: "{{ cname_record_id }}" + register: result + + - name: Verify domain information fetched (CNAME) + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + - result.data is defined + - result.data.records is defined + - result.data.records | length == 1 + - result.data.records.0.id == cname_record_id + - result.data.records.0.data == cname_record_data + - result.data.records.0.type == cname_record_type diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/defaults/main.yml new file mode 100644 index 00000000..312f53e8 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/defaults/main.yml @@ -0,0 +1,14 @@ +do_region: nyc1 +droplet_name: gh-ci-droplet +droplet_image: ubuntu-18-04-x64 +droplet_size: s-1vcpu-1gb +droplet_new_size: s-1vcpu-2gb +project_name: gh-ci-project +firewall_name: gh-ci-firewall +firewall_inbound_rules: + - protocol: "tcp" + ports: "9999" + sources: + addresses: ["0.0.0.0/0", "::/0"] +firewall_outbound_rules: [] +secondary_project_name: test-project diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/tasks/main.yml new file mode 100644 index 00000000..79177be2 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet/tasks/main.yml @@ -0,0 +1,511 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure Droplet is absent (leftover) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + ignore_errors: true # In case one was left from previous run + register: result + + - name: Ensure Droplet is absent + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + register: result + + - name: Verify Droplet is absent + ansible.builtin.assert: + that: + - not result.changed + + - name: Create the Droplet (invalid region) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + region: xyz1 + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + ignore_errors: true # Expected to fail + register: result + + - name: Verify invalid region fails + ansible.builtin.assert: + that: + - result.msg is search("invalid region") + + - name: Create the Droplet (sleep_interval > wait_timeout) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + sleep_interval: 1000 + wait_timeout: 500 + ignore_errors: true # Expected to fail + register: result + + - name: Verify invalid sleep_interval fails + ansible.builtin.assert: + that: + - result.msg is search("Sleep interval") + - result.msg is search("should be less") + + - name: Create the Droplet (sleep_interval <= 0) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + sleep_interval: -100 + wait_timeout: 500 + ignore_errors: true # Expected to fail + register: result + + - name: Verify invalid sleep_interval fails + ansible.builtin.assert: + that: + - result.msg is search("Sleep interval") + - result.msg is search("should be greater") + + - name: Create the Droplet (present, not unique_name, and non-default project) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: false + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + project: "{{ secondary_project_name }}" + register: result + + - name: Verify Droplet is present (from present, not unique_name, and non-default project) + ansible.builtin.assert: + that: + - result.changed + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + - result.assign_status == "assigned" + - result.msg is search("Assigned") + - result.msg is search("to project " ~ secondary_project_name) + # issue #220: droplet.networks.v4 is empty when unique_name != true + - result.data.droplet.networks.v4 != [] + + - name: Destroy the Droplet (absent, by ID only) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + id: "{{ result.data.droplet.id }}" + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + register: result + + - name: Verify Droplet is absent (from absent, by ID only) + ansible.builtin.assert: + that: + - result.changed + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create the Droplet (present, unique_name, and non-default project) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + project: "{{ secondary_project_name }}" + register: result + + - name: Verify Droplet is present (from present) + ansible.builtin.assert: + that: + - result.changed + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + - result.assign_status == "assigned" + - result.msg is search("Assigned") + - result.msg is search("to project " ~ secondary_project_name) + + - name: Destroy the Droplet (absent, by unique name and ID) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + id: "{{ result.data.droplet.id }}" + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + register: result + + - name: Verify Droplet is absent (from absent, by unique name and ID) + ansible.builtin.assert: + that: + - result.changed + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create the Droplet (present, unique_name, default project) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: result + + - name: Verify Droplet is present (from present) + ansible.builtin.assert: + that: + - result.changed + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + + - name: Create the same Droplet again (present, unique_name, default project) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: result + + - name: Verify Droplet is already present (from present, unique_name, default project) + ansible.builtin.assert: + that: + - not result.changed + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + + - name: Destroy the Droplet (absent, by unique name only) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + register: result + + - name: Verify Droplet is absent (from absent, by unique name only) + ansible.builtin.assert: + that: + - result.changed + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create the Droplet (active) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: result + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Verify Droplet is present (from active) + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.droplet is defined + - result.data.droplet.name is defined + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + + - name: Delete the Droplet (absent) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + register: result + + - name: Verify Droplet is absent (from absent) + ansible.builtin.assert: + that: + - result.changed + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create the Droplet (inactive) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: inactive + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 1500 + register: result + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Verify Droplet is present (and off) + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.droplet is defined + - result.data.droplet.name is defined + - result.data.droplet.name == droplet_name + - result.data.droplet.status == "off" + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Resize the Droplet (and active) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_new_size }}" + wait_timeout: 1500 + register: result + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Verify Droplet is active and resized + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.droplet is defined + - result.data.droplet.name is defined + - result.data.droplet.name == droplet_name + - result.data.droplet.status == "active" + - result.data.droplet.size_slug is defined + - result.data.droplet.size_slug == droplet_new_size + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Delete the Droplet (always) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + ignore_errors: true # Should this fail, we'll clean it up next run + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create the Droplet (and assign to Project) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + project: "{{ project_name }}" + register: result + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Verify Droplet is present (from active) + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.droplet is defined + - result.data.droplet.name is defined + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + - result.assign_status is defined + - result.assign_status == "assigned" + - result.msg is defined + - "'Assigned do:droplet' in result.msg" + - result.resources is defined + - result.resources.status is defined + - result.resources.status == "assigned" + # Droplet plus firewall tests + - name: Create a test firewall + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ do_api_key }}" + name: "{{ firewall_name }}" + state: present + inbound_rules: "{{ firewall_inbound_rules }}" + outbound_rules: "{{ firewall_outbound_rules }}" + droplet_ids: [] + register: testing_firewall + + - name: Verify firewall was created + ansible.builtin.assert: + that: + - testing_firewall is defined + - testing_firewall.changed is true + + - name: Create a new droplet and add to the above firewall + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + firewall: ["{{ firewall_name }}"] + size: "{{ droplet_size }}" + region: "{{ do_region }}" + image: "{{ droplet_image }}" + wait_timeout: 500 + register: firewall_droplet + + - name: Verify the droplet has created and has firewall applied + ansible.builtin.assert: + that: + - firewall_droplet is defined + - firewall_droplet.changed is true + + - name: Check our firewall for the new droplet + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ do_api_key }}" + name: "{{ firewall_name }}" + register: firewall_settings + + - name: Verify details of firewall with droplet + ansible.builtin.assert: + that: + - firewall_settings is defined + - "{{ (firewall_settings.data | map(attribute='droplet_ids'))[0] | length > 0 }}" + - "{{ firewall_droplet.data.droplet.id }} in {{ (firewall_settings.data | map(attribute='droplet_ids'))[0] }}" + + - name: Rerun on above droplet without removing firewall + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + firewall: ["{{ firewall_name }}"] + size: "{{ droplet_size }}" + region: "{{ do_region }}" + image: "{{ droplet_image }}" + wait_timeout: 500 + register: firewall_droplet_unchanged + + - name: Verify things were not changed when firewall was present but unchanged + ansible.builtin.assert: + that: + - firewall_droplet_unchanged is defined + - firewall_droplet_unchanged.changed is false + + - name: Rerun on above droplet and remove firewall + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ droplet_name }}" + unique_name: true + firewall: [] + size: "{{ droplet_size }}" + region: "{{ do_region }}" + image: "{{ droplet_image }}" + wait_timeout: 500 + register: firewall_droplet_removal + + - name: Verify things were changed for firewall removal + ansible.builtin.assert: + that: + - firewall_droplet_removal is defined + - firewall_droplet_removal.changed is true + + - name: Check our firewall for the droplet being removed + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ do_api_key }}" + name: "{{ firewall_name }}" + register: firewall_settings_removal + + - name: Verify details of firewall with droplet + ansible.builtin.assert: + that: + - firewall_settings_removal is defined + - "{{ (firewall_settings_removal.data | map(attribute='droplet_ids'))[0] | length == 0 }}" + - "{{ firewall_droplet.data.droplet.id }} not in {{ firewall_settings_removal.data | map(attribute='droplet_ids') }}" + + always: + + - name: Delete the Firewall (always) + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ do_api_key }}" + name: "{{ firewall_name }}" + state: absent + + - name: Delete the Droplet (always) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + ignore_errors: true # Should this fail, we'll clean it up next run diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/tasks/main.yml new file mode 100644 index 00000000..e68f9eb5 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_droplet_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather droplet information + community.digitalocean.digital_ocean_droplet_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify droplet info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/defaults/main.yml new file mode 100644 index 00000000..08ce300a --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/defaults/main.yml @@ -0,0 +1,7 @@ +firewall_name: gh-ci-firewall +firewall_inbound_rules: + - protocol: "tcp" + ports: "9999" + sources: + addresses: ["0.0.0.0/0", "::/0"] +firewall_outbound_rules: [] diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/tasks/main.yml new file mode 100644 index 00000000..d125e34e --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create a Firewall + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ firewall_name }}" + inbound_rules: "{{ firewall_inbound_rules }}" + outbound_rules: "{{ firewall_outbound_rules }}" + register: result + + - name: Verify Firewall created + ansible.builtin.assert: + that: + - result.changed + + - name: Find the Firewall + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ do_api_key }}" + name: "{{ firewall_name }}" + register: result + + - name: Verify Firewall found + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data | length == 1 + - result.data.0.name is defined + - result.data.0.name == firewall_name + + always: + + - name: Delete a Firewall + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ firewall_name }}" + register: result + + - name: Verify Firewall deleted + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/tasks/main.yml new file mode 100644 index 00000000..c14d43a9 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_firewall_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather firewall information + community.digitalocean.digital_ocean_firewall_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify firewall information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/defaults/main.yml new file mode 100644 index 00000000..30cbae2d --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/defaults/main.yml @@ -0,0 +1,5 @@ +do_region: nyc1 +droplet_name: gh-ci-droplet +droplet_image: ubuntu-18-04-x64 +droplet_size: s-1vcpu-1gb +project_name: gh-ci-project diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/tasks/main.yml new file mode 100644 index 00000000..875c1211 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip/tasks/main.yml @@ -0,0 +1,205 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about all Floating IPs + community.digitalocean.digital_ocean_floating_ip_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify we retrieved all Floating IPs + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + + - name: Delete existing Floating IPs + community.digitalocean.digital_ocean_floating_ip: + state: absent + ip: "{{ item.ip }}" + oauth_token: "{{ do_api_key }}" + loop: "{{ result.floating_ips }}" + + - name: Create the testing Droplet (active) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: droplet_result + + - name: Verify Droplet is active + ansible.builtin.assert: + that: + - droplet_result.changed + - droplet_result.data is defined + - droplet_result.data.droplet is defined + - droplet_result.data.droplet.name is defined + - droplet_result.data.droplet.name == droplet_name + - droplet_result.data.droplet.status == "active" + + - name: Create a Floating IP and assign to Project + community.digitalocean.digital_ocean_floating_ip: + state: present + region: "{{ do_region }}" + oauth_token: "{{ do_api_key }}" + project: "{{ project_name }}" + register: floating_ip + + - name: Verify that a Floating IP was created (and assigned to Project) + ansible.builtin.assert: + that: + - floating_ip.changed + - floating_ip.assign_status is defined + - floating_ip.assign_status == "assigned" + - floating_ip.msg is defined + - "'Assigned do:floatingip' in floating_ip.msg" + - floating_ip.resources is defined + - floating_ip.resources.status is defined + - floating_ip.resources.status == "assigned" + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Delete the Floating IP + community.digitalocean.digital_ocean_floating_ip: + state: absent + ip: "{{ floating_ip.data.floating_ip.ip }}" + region: "{{ do_region }}" + oauth_token: "{{ do_api_key }}" + register: result + when: + - floating_ip.data.floating_ip.ip is defined + + - name: Verify that a Floating IP was deleted + ansible.builtin.assert: + that: + - result.changed + when: + - floating_ip.data.floating_ip.ip is defined + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: "Create a Floating IP" + community.digitalocean.digital_ocean_floating_ip: + state: present + region: "{{ do_region }}" + oauth_token: "{{ do_api_key }}" + register: floating_ip + + - name: Verify that a Floating IP was created + ansible.builtin.assert: + that: + - floating_ip.changed + + - name: Gather information about all Floating IPs + community.digitalocean.digital_ocean_floating_ip_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify we retrieved all Floating IPs + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + + - name: Find our Floating IP + set_fact: + ci_floating_ip: "{{ result.floating_ips | selectattr('ip', 'equalto', floating_ip.data.floating_ip.ip) }}" + register: result_find + + - name: Verify we found our Floating IP + ansible.builtin.assert: + that: + - not result_find.changed + - not result_find.failed + - result_find.ansible_facts.ci_floating_ip is defined + - result_find.ansible_facts.ci_floating_ip | length == 1 + + - name: Attach the Floating IP + community.digitalocean.digital_ocean_floating_ip: + state: attached + ip: "{{ floating_ip.data.floating_ip.ip }}" + droplet_id: "{{ droplet_result.data.droplet.id }}" + oauth_token: "{{ do_api_key }}" + register: attach_result + when: + - floating_ip.data.floating_ip.ip is defined + - droplet_result.data.droplet.id is defined + + - name: Verify that a Floating IP was attached + ansible.builtin.assert: + that: + - attach_result.changed + when: + - floating_ip.data.floating_ip.ip is defined + - droplet_result.data.droplet.id is defined + - attach_result is defined + + - name: Detach the Floating IP + community.digitalocean.digital_ocean_floating_ip: + state: detached + ip: "{{ floating_ip.data.floating_ip.ip }}" + oauth_token: "{{ do_api_key }}" + register: detach_result + when: + - floating_ip.data.floating_ip.ip is defined + - droplet_result.data.droplet.id is defined + - attach_result.changed + + - name: Verify that a Floating IP was detached + ansible.builtin.assert: + that: + - detach_result.changed + when: + - floating_ip.data.floating_ip.ip is defined + - droplet_result.data.droplet.id is defined + - detach_result is defined + + always: + + - name: Delete the Floating IP + community.digitalocean.digital_ocean_floating_ip: + state: absent + ip: "{{ floating_ip.data.floating_ip.ip }}" + region: "{{ do_region }}" + oauth_token: "{{ do_api_key }}" + register: result + when: + - floating_ip.data.floating_ip.ip is defined + + - name: Verify that a Floating IP was deleted + ansible.builtin.assert: + that: + - result.changed + when: + - floating_ip.data.floating_ip.ip is defined + + - name: Destroy the Droplet (absent) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + register: destroy_result + + - name: Verify Droplet is absent (from absent) + ansible.builtin.assert: + that: + - destroy_result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/defaults/main.yml new file mode 100644 index 00000000..ae70fbd7 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/defaults/main.yml @@ -0,0 +1 @@ +do_region: nyc1 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/tasks/main.yml new file mode 100644 index 00000000..017724dd --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_floating_ip_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about all Floating IPs + community.digitalocean.digital_ocean_floating_ip_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify we retrieved all Floating IPs + ansible.builtin.assert: + that: + - not result.changed + - not result.failed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/defaults/main.yml new file mode 100644 index 00000000..dc89a90d --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/defaults/main.yml @@ -0,0 +1 @@ +image_type: distribution diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/tasks/main.yml new file mode 100644 index 00000000..5813b721 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_image_info/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about all images + community.digitalocean.digital_ocean_image_info: + oauth_token: "{{ do_api_key }}" + image_type: "{{ image_type }}" + register: result + + - name: Verify image info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/defaults/main.yml new file mode 100644 index 00000000..6dd5f742 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/defaults/main.yml @@ -0,0 +1,15 @@ +do_region: nyc1 + +cluster_name: gh-ci-k8s +cluster_version: latest +cluster_node_pools: + - name: gh-ci-k8s-workers + size: s-1vcpu-2gb + count: 1 + +cluster_ha_name: gh-ci-ha-k8s +cluster_ha_version: latest +cluster_ha_node_pools: + - name: gh-ci-k8s-ha-workers + size: s-1vcpu-2gb + count: 3 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/tasks/main.yml new file mode 100644 index 00000000..4d605fa7 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes/tasks/main.yml @@ -0,0 +1,229 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure Kubernetes cluster is absent + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cluster_name }}" + version: "{{ cluster_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + register: result + + - name: Verify Kubernetes cluster is absent + ansible.builtin.assert: + that: + - not result.changed + + - name: Ensure Kubernetes cluster is absent (ha) + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cluster_ha_name }}" + version: "{{ cluster_ha_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_ha_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + register: result + + - name: Verify Kubernetes cluster is absent (ha) + ansible.builtin.assert: + that: + - not result.changed + + - name: Gather information about nonexistent Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes_info: + oauth_token: "{{ do_api_key }}" + name: nonexistent-cluster + return_kubeconfig: true + register: result + ignore_errors: true # expected to fail + + - name: Verify nonexistent Kubernetes cluster is failed + ansible.builtin.assert: + that: + - not result.changed + - result.failed + - result.msg == "Kubernetes cluster not found" + + - name: Create the Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ cluster_name }}" + version: "{{ cluster_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + register: result + + - name: Verify Kubernetes cluster is present + ansible.builtin.assert: + that: + - result.changed + - result.data.name == cluster_name + + - name: Gather information about the Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes_info: + oauth_token: "{{ do_api_key }}" + name: "{{ cluster_name }}" + return_kubeconfig: false + register: result + + - name: Verify Kubernetes cluster information is found + ansible.builtin.assert: + that: + - not result.changed + - result.data.name == cluster_name + + - name: Gather information about the Kubernetes cluster (with kubeconfig) + community.digitalocean.digital_ocean_kubernetes_info: + oauth_token: "{{ do_api_key }}" + name: "{{ cluster_name }}" + return_kubeconfig: true + register: result + + - name: Verify Kubernetes cluster information is found + ansible.builtin.assert: + that: + - not result.changed + - result.data.name == cluster_name + - result.data.kubeconfig is defined + - result.data.kubeconfig | length > 0 + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Delete the Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cluster_name }}" + version: "{{ cluster_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + register: result + + - name: Verify the Kubernetes cluster is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + # FIXME: https://github.com/ansible-collections/community.digitalocean/issues/204 + # @mamercad: I'm asking internally why this isn't done automatically. + - name: Delete the Kubernetes cluster tag + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ do_api_key }}" + state: absent + name: "k8s:{{ result.data.id }}" + + - name: Create the Kubernetes cluster (ha) + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ cluster_ha_name }}" + version: "{{ cluster_ha_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_ha_node_pools }}" + return_kubeconfig: false + wait_timeout: 1200 + ha: true + register: result + + - name: Verify Kubernetes cluster is present (ha) + ansible.builtin.assert: + that: + - result.changed + - result.data.name == cluster_ha_name + - result.data.ha is true + + - name: Gather information about the Kubernetes cluster (ha) + community.digitalocean.digital_ocean_kubernetes_info: + oauth_token: "{{ do_api_key }}" + name: "{{ cluster_ha_name }}" + return_kubeconfig: true + register: result + + - name: Verify Kubernetes cluster information is found (ha) + ansible.builtin.assert: + that: + - not result.changed + - result.data.kubeconfig | length > 0 + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Delete the Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cluster_ha_name }}" + version: "{{ cluster_ha_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_ha_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + register: result + + - name: Verify the Kubernetes cluster is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + # FIXME: https://github.com/ansible-collections/community.digitalocean/issues/204 + # @mamercad: I'm asking internally why this isn't done automatically. + - name: Delete the Kubernetes cluster tag + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ do_api_key }}" + state: absent + name: "k8s:{{ result.data.id }}" + + always: + + - name: Delete the Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cluster_name }}" + version: "{{ cluster_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + ignore_errors: true # Should this fail, we'll clean it up next run + + - name: Delete the Kubernetes cluster (ha) + community.digitalocean.digital_ocean_kubernetes: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ cluster_ha_name }}" + version: "{{ cluster_ha_version }}" + region: "{{ do_region }}" + node_pools: "{{ cluster_ha_node_pools }}" + return_kubeconfig: false + wait_timeout: 600 + ignore_errors: true # Should this fail, we'll clean it up next run diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/defaults/main.yml new file mode 100644 index 00000000..c1f5ba32 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/defaults/main.yml @@ -0,0 +1,8 @@ +do_region: nyc1 + +cluster_name: gh-ci-k8s +cluster_version: latest +cluster_node_pools: + - name: gh-ci-k8s-workers + size: s-1vcpu-2gb + count: 1 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/tasks/main.yml new file mode 100644 index 00000000..a0787f33 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_kubernetes_info/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about the Kubernetes cluster + community.digitalocean.digital_ocean_kubernetes_info: + oauth_token: "{{ do_api_key }}" + name: "{{ cluster_name }}" + return_kubeconfig: false + register: result + ignore_errors: true # expected to fail + + - name: Verify Kubernetes cluster information is not found + ansible.builtin.assert: + that: + - not result.changed + - not result.data.name is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/defaults/main.yml new file mode 100644 index 00000000..a2386cb7 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/defaults/main.yml @@ -0,0 +1,8 @@ +do_region: nyc1 +droplet_name: gh-ci-droplet +droplet_image: ubuntu-18-04-x64 +droplet_size: s-1vcpu-1gb +lb_name: gh-ci-loadbalancer +lb_size: lb-small +project_name: gh-ci-project +lb_tag: test-tag diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/tasks/main.yml new file mode 100644 index 00000000..d04d18b6 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer/tasks/main.yml @@ -0,0 +1,202 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure Droplet is absent (leftover) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + ignore_errors: true # In case one was left from previous run + + - name: Ensure Load Balancer is absent (leftover) + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ lb_name }}" + region: "{{ do_region }}" + ignore_errors: true # In case one was left from previous run + + - name: Create the Droplet (active) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: droplet + + - name: Verify Droplet is present (from active) + ansible.builtin.assert: + that: + - droplet.changed + - droplet.data is defined + - droplet.data.droplet is defined + - droplet.data.droplet.name is defined + - droplet.data.droplet.name == droplet_name + - droplet.data.droplet.status in ["new", "active", "available"] + + - name: Create the Load Balancer (present) + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ lb_name }}" + droplet_ids: + - "{{ droplet.data.droplet.id }}" + region: "{{ do_region }}" + register: lb + + - name: Verify Load Balancer is present (from present) + ansible.builtin.assert: + that: + - lb.changed + - lb.data is defined + - lb.data.load_balancer is defined + - lb.data.load_balancer.name is defined + - lb.data.load_balancer.name == lb_name + - lb.data.load_balancer.status in ["new", "active", "available"] + + - name: Delete the Load Balancer + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ lb_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify Load Balancer is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Create the Load Balancer (and assign to Project) + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ lb_name }}" + droplet_ids: + - "{{ droplet.data.droplet.id }}" + region: "{{ do_region }}" + project: "{{ project_name }}" + register: lb + + - name: Verify Load Balancer is present (from present) + ansible.builtin.assert: + that: + - lb.changed + - lb.data is defined + - lb.data.load_balancer is defined + - lb.data.load_balancer.name is defined + - lb.data.load_balancer.name == lb_name + - lb.data.load_balancer.status in ["new", "active", "available"] + - lb.assign_status is defined + - lb.assign_status == "assigned" + - lb.msg is defined + - "'Assigned do:loadbalancer' in lb.msg" + - lb.resources is defined + - lb.resources.status is defined + - lb.resources.status == "assigned" + + - name: Delete the Load Balancer + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ lb_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify Load Balancer is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Create the Load Balancer (invalid tag + droplet_ids) + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ lb_name }}" + droplet_ids: + - "{{ droplet.data.droplet.id }}" + tag: "{{ lb_tag }}" + region: "{{ do_region }}" + ignore_errors: true # Expected to fail + register: result + + - name: Verify invalid tag + droplet_ids fails + ansible.builtin.assert: + that: + - result.msg is search("mutually exclusive: tag|droplet_ids") + + - name: Create the Load Balancer (missing tag + droplet_ids) + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ lb_name }}" + region: "{{ do_region }}" + ignore_errors: true # Expected to fail + register: result + + - name: Verify missing tag + droplet_ids fails + ansible.builtin.assert: + that: + - result.msg is search("missing: tag, droplet_ids") + + - name: Create the Load Balancer (using tag) + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: present + name: "{{ lb_name }}" + tag: "{{ lb_tag }}" + region: "{{ do_region }}" + register: lb + + - name: Verify Load Balancer is present (from present) + ansible.builtin.assert: + that: + - lb.data is defined + - lb.data.load_balancer is defined + - lb.data.load_balancer.tag is defined + - lb.data.load_balancer.tag == lb_tag + - lb.data.load_balancer.status in ["new", "active", "available"] + + always: + + - name: Delete the Droplet + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + register: result + + - name: Verify Droplet is deleted + ansible.builtin.assert: + that: + - result.changed + + - name: Delete the Load Balancer + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ lb_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify Load Balancer is deleted + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/tasks/main.yml new file mode 100644 index 00000000..df420820 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_load_balancer_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about all load balancers + community.digitalocean.digital_ocean_load_balancer_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify load balancer information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/defaults/main.yml new file mode 100644 index 00000000..c25f2f80 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/defaults/main.yml @@ -0,0 +1,5 @@ +do_region: nyc1 +droplet_name: gh-ci-droplet +droplet_image: ubuntu-18-04-x64 +droplet_size: s-1vcpu-1gb +alert_email: mamercad@gmail.com diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/tasks/main.yml new file mode 100644 index 00000000..d2a4d55a --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_monitoring_alerts/tasks/main.yml @@ -0,0 +1,135 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure Droplet is absent (leftover) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + ignore_errors: true # In case one was left from previous run + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create the Droplet (active) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: droplet + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Verify Droplet is present (from active) + ansible.builtin.assert: + that: + - droplet.changed + - droplet.data is defined + - droplet.data.droplet is defined + - droplet.data.droplet.name is defined + - droplet.data.droplet.name == droplet_name + - droplet.data.droplet.status in ["new", "active", "available"] + + - name: Create Droplet Monitoring alerts policy + community.digitalocean.digital_ocean_monitoring_alerts: + state: present + oauth_token: "{{ do_api_key }}" + alerts: + email: ["{{ alert_email }}"] + slack: [] + compare: GreaterThan + description: Droplet load1 alert + enabled: true + entities: ["{{ droplet.data.droplet.id }}"] + tags: ["my_alert_tag"] + type: v1/insights/droplet/load_1 + value: 3.14159 + window: 5m + register: monitoring_alert_policy + + - name: Verify Monitoring alerts policy is present + ansible.builtin.assert: + that: + - monitoring_alert_policy.changed + - monitoring_alert_policy.data is defined + + - name: Create Droplet Monitoring alerts policy + community.digitalocean.digital_ocean_monitoring_alerts: + state: present + oauth_token: "{{ do_api_key }}" + alerts: + email: ["{{ alert_email }}"] + slack: [] + compare: GreaterThan + description: Droplet load1 alert + enabled: true + entities: ["{{ droplet.data.droplet.id }}"] + tags: ["my_alert_tag"] + type: v1/insights/droplet/load_1 + value: 3.14159 + window: 5m + register: monitoring_alert_policy + + - name: Verify Monitoring alerts policy is not changed (idempotency) + ansible.builtin.assert: + that: + - not monitoring_alert_policy.changed + - monitoring_alert_policy.data is defined + + - name: Get Droplet Monitoring alerts polices + community.digitalocean.digital_ocean_monitoring_alerts_info: + oauth_token: "{{ do_api_key }}" + register: monitoring_alerts + + - name: Verify Monitoring alerts has data and is not changed + ansible.builtin.assert: + that: + - not monitoring_alerts.changed + - monitoring_alerts.data is defined + + - name: Delete Droplet Monitoring alerts policy + community.digitalocean.digital_ocean_monitoring_alerts: + state: absent + oauth_token: "{{ do_api_key }}" + uuid: "{{ monitoring_alert_policy.data.uuid }}" + register: result + + - name: Verify Monitoring alerts policy is deleted + ansible.builtin.assert: + that: + - result.changed + + always: + + - name: Delete the Droplet + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + register: result + + - name: Verify Droplet is deleted + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/tasks/main.yml new file mode 100644 index 00000000..c7b2e35c --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project/tasks/main.yml @@ -0,0 +1,183 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Ensure Project is absent (leftover) + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: absent + name: "my-test-project" + register: result + with_items: + - "my-test-project" + - "my-updated-test-project" + + - name: Verify Project is absent + ansible.builtin.assert: + that: + - not result.changed + + - name: Create the Project (present) + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: present + name: "my-test-project" + purpose: "IoT" + description: "This is a test project" + environment: "Development" + register: result + + - name: Verify Project is present (from present) + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.project is defined + - result.data.project.name is defined + - result.data.project.name == "my-test-project" + + - name: Create the Project (existing, no changes) + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: present + name: "my-test-project" + purpose: "IoT" + description: "This is a test project" + environment: "Development" + register: result + + - name: Verify Project is present (from existing) + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data.project is defined + - result.data.project.name is defined + - result.data.project.name == "my-test-project" + + - name: Update the project + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: present + name: "my-test-project" + purpose: "IoT" + description: "This is a test project" + environment: "Production" + register: result + + - name: Verify Project is updated + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.project is defined + - result.data.project.name is defined + - result.data.project.name == "my-test-project" + - result.data.project.environment == "Production" + + - name: Update the project name + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: present + name: "my-updated-test-project" + id: "{{ result.data.project.id }}" + purpose: "IoT" + description: "This is a test project" + environment: "Production" + register: uresult + + - name: Verify Project name is updated + ansible.builtin.assert: + that: + - uresult.changed + - uresult.data is defined + - uresult.data.project is defined + - uresult.data.project.name is defined + - uresult.data.project.name == "my-updated-test-project" + - uresult.data.project.id == result.data.project.id + + - name: Update the project name with non-standard purpose + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: present + name: "my-updated-test-project" + purpose: "test" + description: "This is a test project" + environment: "Production" + register: result + + - name: Verify Project is updated with non-standard purpose + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.project is defined + - result.data.project.name is defined + - result.data.project.name == "my-updated-test-project" + - result.data.project.purpose == 'Other: test' + + - name: Update the project name with non-standard purpose (no change) + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: present + name: "my-updated-test-project" + purpose: "test" + description: "This is a test project" + environment: "Production" + register: result + + - name: Verify Project is updated with non-standard purpose (no change) + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data.project is defined + - result.data.project.name is defined + - result.data.project.name == "my-updated-test-project" + - result.data.project.purpose == 'Other: test' + + - name: Delete non-existent project + community.digitalocean.digital_ocean_project: + name: my-non-existant-project + state: absent + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify project deletion (non-existing project) + ansible.builtin.assert: + that: + - not result.changed + + - name: Delete test project + community.digitalocean.digital_ocean_project: + name: "my-updated-test-project" + oauth_token: "{{ do_api_key }}" + state: absent + register: result + + - name: Verify project deletion + ansible.builtin.assert: + that: + - result.changed + + always: + + - name: Delete the Project + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: absent + name: "my-test-project" + ignore_errors: true # Should this fail, we'll clean it up next run + + - name: Delete the updated project + community.digitalocean.digital_ocean_project: + oauth_token: "{{ do_api_key }}" + state: absent + name: "my-updated-test-project" + ignore_errors: true # Should this fail, we'll clean it up next run diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/tasks/main.yml new file mode 100644 index 00000000..189405c7 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_project_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather project information + community.digitalocean.digital_ocean_project_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify project info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/tasks/main.yml new file mode 100644 index 00000000..b3129130 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_region_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about all regions + community.digitalocean.digital_ocean_region_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify region info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/tasks/main.yml new file mode 100644 index 00000000..504779b8 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_size_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about all sizes + community.digitalocean.digital_ocean_size_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify size info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/defaults/main.yml new file mode 100644 index 00000000..d1396ae4 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/defaults/main.yml @@ -0,0 +1,9 @@ +do_region: nyc1 +droplet_name: gh-ci-droplet +droplet_image: ubuntu-18-04-x64 +droplet_size: s-1vcpu-1gb +snapshot_name: gh-ci-snapshot +volume_name: gh-ci-volume +volume_size: 15 +volume_down_size: 10 +volume_up_size: 20 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/tasks/main.yml new file mode 100644 index 00000000..6a5e3816 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot/tasks/main.yml @@ -0,0 +1,216 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + # + # Droplet snapshot + # + + - name: Ensure Droplet is absent (leftover) + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + ignore_errors: true # In case one was left from previous run + + - name: Create the Droplet + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: active + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + wait_timeout: 500 + register: result + + - name: Verify Droplet is present + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.droplet is defined + - result.data.droplet.name is defined + - result.data.droplet.name == droplet_name + - result.data.droplet.status in ["new", "active", "available"] + + - name: Set a fact for the Droplet id + ansible.builtin.set_fact: + droplet_id: "{{ result.data.droplet.id }}" + + - name: Snapshot the Droplet + community.digitalocean.digital_ocean_snapshot: + state: present + snapshot_type: droplet + snapshot_name: "{{ snapshot_name }}" + droplet_id: "{{ droplet_id }}" + oauth_token: "{{ do_api_key }}" + wait_timeout: 500 + register: result + + - name: Verify snapshot is present + ansible.builtin.assert: + that: + - result.changed + - not result.failed + - result.msg is search("Created snapshot") + + - name: Gather information about all snapshots + community.digitalocean.digital_ocean_snapshot_info: + oauth_token: "{{ do_api_key }}" + register: snapshot_info + + - name: Set a fact for the snapshot id + ansible.builtin.set_fact: + snapshot_id: "{{ item.id }}" + loop: "{{ snapshot_info.data | community.general.json_query(name) }}" + vars: + name: "[?name=='{{ snapshot_name }}']" + + - name: Verify snapshot id is defined + ansible.builtin.assert: + that: + - snapshot_id is defined + + - name: Delete the snapshot + community.digitalocean.digital_ocean_snapshot: + state: absent + snapshot_id: "{{ snapshot_id }}" + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify snapshot is absent + ansible.builtin.assert: + that: + - result.changed + - not result.failed + - result.msg is search("Deleted snapshot") + + # + # Volume snapshot + # + + - name: Ensure volume is absent (leftover) + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: absent + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_size }}" + ignore_errors: true # In case one was left from previous run + + - name: Create a volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: present + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + block_size: "{{ volume_size }}" + register: result + + - name: Verify volume is present + ansible.builtin.assert: + that: + - result.changed + - not result.failed + + - name: Set a fact for the volume id + ansible.builtin.set_fact: + volume_id: "{{ result.id }}" + + - name: Snapshot the volume + community.digitalocean.digital_ocean_snapshot: + state: present + snapshot_type: volume + snapshot_name: "{{ snapshot_name }}" + volume_id: "{{ volume_id }}" + oauth_token: "{{ do_api_key }}" + wait_timeout: 500 + register: result + + - name: Verify snapshot is present + ansible.builtin.assert: + that: + - result.changed + - not result.failed + - result.msg is search("Created snapshot") + + - name: Get information about all snapshots + community.digitalocean.digital_ocean_snapshot_info: + oauth_token: "{{ do_api_key }}" + register: snapshot_info + + - name: Set a fact for the snapshot id + ansible.builtin.set_fact: + snapshot_id: "{{ item.id }}" + loop: "{{ snapshot_info.data | community.general.json_query(name) }}" + vars: + name: "[?name=='{{ snapshot_name }}']" + + - name: Verify snapshot id is defined + ansible.builtin.assert: + that: + - snapshot_id is defined + + - name: Delete the snapshot + community.digitalocean.digital_ocean_snapshot: + state: absent + snapshot_id: "{{ snapshot_id }}" + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify snapshot is absent + ansible.builtin.assert: + that: + - result.changed + - not result.failed + - result.msg is search("Deleted snapshot") + + - name: Remove the volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: absent + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify the volume is deleted + ansible.builtin.assert: + that: + - result.changed + + always: + + - name: Delete the Droplet + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ do_api_key }}" + state: absent + name: "{{ droplet_name }}" + unique_name: true + region: "{{ do_region }}" + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + ignore_errors: true # Should this fail, we'll clean it up next run + + - name: Remove the volume + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ do_api_key }}" + command: create + state: absent + volume_name: "{{ volume_name }}" + region: "{{ do_region }}" + ignore_errors: true # Should this fail, we'll clean it up next run diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/defaults/main.yml new file mode 100644 index 00000000..3289b386 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/defaults/main.yml @@ -0,0 +1 @@ +snapshot_type: all diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/tasks/main.yml new file mode 100644 index 00000000..e39f95a5 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_snapshot_info/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about snapshots (all) + community.digitalocean.digital_ocean_snapshot_info: + oauth_token: "{{ do_api_key }}" + snapshot_type: "all" + register: result + + - name: Verify snapshot info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + + - name: Gather information about snapshots (droplets) + community.digitalocean.digital_ocean_snapshot_info: + oauth_token: "{{ do_api_key }}" + snapshot_type: "droplet" + register: result + + - name: Verify snapshot info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + + - name: Gather information about snapshots (volumes) + community.digitalocean.digital_ocean_snapshot_info: + oauth_token: "{{ do_api_key }}" + snapshot_type: "volume" + register: result + + - name: Verify snapshot info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + + - name: Gather information about snapshots (by_id) + community.digitalocean.digital_ocean_snapshot_info: + oauth_token: "{{ do_api_key }}" + snapshot_type: "by_id" + snapshot_id: "12345678" + ignore_errors: true + register: result + + - name: Verify that a non-existent snapshot failed + ansible.builtin.assert: + that: + - result.failed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/defaults/main.yml new file mode 100644 index 00000000..493a28a8 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/defaults/main.yml @@ -0,0 +1,2 @@ +space_name: gh-ci-space-1 +space_region: nyc3 diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/tasks/main.yml new file mode 100644 index 00000000..84f947b6 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces/tasks/main.yml @@ -0,0 +1,114 @@ +--- +- block: + + - name: Ensure Spaces API keys are provided + ansible.builtin.fail: + msg: aws_access_key_id and aws_secret_access_key should be defined in tests/integration/integration_config.yml + when: + - aws_access_key_id is not defined + - aws_access_key_id | length == 0 + - aws_secret_access_key is not defined + - aws_secret_access_key | length == 0 + + - name: Create Space {{ space_name }} in {{ space_region }} + community.digitalocean.digital_ocean_spaces: + state: present + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + name: "{{ space_name }}" + region: "{{ space_region }}" + register: result + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - result.changed + - result.data.space is defined + - result.data.space.name == "{{ space_name }}" + - result.data.space.region == "{{ space_region }}" + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Create Space {{ space_name }} in {{ space_region }} (again, test idempotency) + community.digitalocean.digital_ocean_spaces: + state: present + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + name: "{{ space_name }}" + region: "{{ space_region }}" + register: result + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - not result.changed + + - name: Delete Space {{ space_name }} in {{ space_region }} + community.digitalocean.digital_ocean_spaces: + state: absent + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + name: "{{ space_name }}" + region: "{{ space_region }}" + register: result + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - result.changed + - result.msg is search("Deleted Space") + + - name: Give the cloud a minute to settle + ansible.builtin.pause: + minutes: 1 + + - name: Delete Space {{ space_name }} in {{ space_region }} (again, test idempotency) + community.digitalocean.digital_ocean_spaces: + state: absent + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + name: "{{ space_name }}" + region: "{{ space_region }}" + register: result + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - not result.changed + - result.msg is search("No Space") + + - name: Create Space {{ space_name }} in xyz1 (test bad region) + community.digitalocean.digital_ocean_spaces: + state: present + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + name: "{{ space_name }}" + region: xyz1 + register: result + ignore_errors: true # Expected to fail + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - not result.changed + - result.exception is defined + - result.failed + + - name: Delete Space {{ space_name }} in xyz1 (test bad region) + community.digitalocean.digital_ocean_spaces: + state: absent + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + name: "{{ space_name }}" + region: xyz1 + register: result + ignore_errors: true # Expected to fail + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - not result.changed + - result.exception is defined + - result.failed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/tasks/main.yml new file mode 100644 index 00000000..0ed2491f --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_spaces_info/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- block: + + - name: Ensure Spaces API keys are provided + ansible.builtin.fail: + msg: aws_access_key_id and aws_secret_access_key should be defined in tests/integration/integration_config.yml + when: + - aws_access_key_id is not defined + - aws_access_key_id | length == 0 + - aws_secret_access_key is not defined + - aws_secret_access_key | length == 0 + + - name: Get Spaces in nyc3 + community.digitalocean.digital_ocean_spaces_info: + state: present + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + region: nyc3 + register: result + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - not result.changed + - result.data.spaces is defined + + - name: Get Spaces in xyz1 (test bad region) + community.digitalocean.digital_ocean_spaces_info: + state: present + aws_access_key_id: "{{ aws_access_key_id }}" + aws_secret_access_key: "{{ aws_secret_access_key }}" + region: xyz1 + register: result + ignore_errors: true # Expected to fail + + - name: Verify Spaces info + ansible.builtin.assert: + that: + - not result.changed + - result.exception is defined + - result.failed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/defaults/main.yml new file mode 100644 index 00000000..f98a0a54 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/defaults/main.yml @@ -0,0 +1,4 @@ +key_name: gh-ci-sshkey +dummy_ssh_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDNnKfzimdn4+Tz2hlWRtZVl9N+QiN2XAkDMo/RTRD7I2NyIrzlMn+7Fg3lLjbkIDU44npzkFnneKrcnf/utniST7Isvbnvdz4B/hJcsbAaS8A9ZE0vgv8IvRETlkOszXuYl0mi12NlqHr8UhcKIzwvUpae421lzNpjKb72mIaZehefPT1E+p9qgMYTOvO+xa1Q/U1JxEYeygY228J9APkV5Elu/QWqgsoMVuSbiKhxIt8vvFo9x7pgKyc9hRbraMMcvdCDq5vl3PRjuW4nijft7Em2nt2vb4duUWgOhtCHIzNRDEJ1fX14lvT01UULr/vjC+r4/j+F288V8CWH3DCX33wGBaFVj7SF4EoK9XhccGslGoq5+8op9jPuLySRw19g41qcsmCB4v895fEoMjoIJvJVSOyjnszJyhIW/E5qNVKNnOR/IAu3OfR3HCDRGSQ7qhIu3xd1Cjai2V8xo17m4WOjnZ3mbIEJHsicOF+dWumVVm0aGF552Xl7jSyFGv4f6ML0SruKrJctvzILRfTsRUH2NFTDqvE1WmQ9pAfXmW9F5VFQ71yhKUIN7ItwR5yBYc1h7R7X5uYQ5JtFeG+nOvnedTUXvc6Pzx3WUWhuAEjFvXg66JoG2Vs4ReIE9lcPwCqh/aVMlhc4X2/O1GavnslhOt08nHmhNEUfP+fCyw== Dummy CI SSH Key +invalid_key_name: gi-ci-invalid-sshkey +invalid_ssh_pub_key: invalid diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/tasks/main.yml new file mode 100644 index 00000000..a5ab4d24 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey/tasks/main.yml @@ -0,0 +1,68 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create an invalid SSH key + community.digitalocean.digital_ocean_sshkey: + name: "{{ invalid_key_name }}" + ssh_pub_key: "{{ invalid_ssh_pub_key }}" + oauth_token: "{{ do_api_key }}" + ignore_errors: true + register: result + + - name: Verify that the invalid SSH key failed + ansible.builtin.assert: + that: + - result.failed + + - name: Create a valid SSH key + community.digitalocean.digital_ocean_sshkey: + name: "{{ key_name }}" + ssh_pub_key: "{{ dummy_ssh_pub_key }}" + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify that SSH key was created + ansible.builtin.assert: + that: + - result.changed + + - name: Fetch all SSH keys + community.digitalocean.digital_ocean_sshkey_info: + oauth_token: "{{ do_api_key }}" + register: ssh_keys + + - name: Find our dummy ci SSH key + set_fact: + ci_ssh_key: "{{ ssh_keys.data | selectattr('name', 'equalto', key_name) }}" + register: result_find + + - name: Verify that ci SSH key was found + ansible.builtin.assert: + that: + - not result_find.failed + - not result_find.changed + - result_find.ansible_facts is defined + - result_find.ansible_facts.ci_ssh_key is defined + - result_find.ansible_facts.ci_ssh_key.0 is defined + - result_find.ansible_facts.ci_ssh_key.0.name == key_name + + always: + + - name: Delete the SSH key + community.digitalocean.digital_ocean_sshkey: + state: "absent" + fingerprint: "{{ result.data.ssh_key.fingerprint }}" + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify that the SSH key was deleted + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/tasks/main.yml new file mode 100644 index 00000000..1b9f9f66 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_sshkey_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather SSH Key information + community.digitalocean.digital_ocean_sshkey_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify SSH key info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/defaults/main.yml new file mode 100644 index 00000000..b57bb681 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/defaults/main.yml @@ -0,0 +1 @@ +tag_name: integration-test diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/tasks/main.yml new file mode 100644 index 00000000..c058b7a9 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag/tasks/main.yml @@ -0,0 +1,68 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create a new tag + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ do_api_key }}" + name: "{{ tag_name }}" + state: present + register: create_tag + + - name: Create a new tag for idempotency + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ do_api_key }}" + name: "{{ tag_name }}" + state: present + register: create_tag_idempotent + + - name: Verify tag created and idempotent + ansible.builtin.assert: + that: + - create_tag.changed == True + - create_tag_idempotent.changed == False + - create_tag.data.tag.name == tag_name + + - name: Gather information about tag with given name + community.digitalocean.digital_ocean_tag_info: + oauth_token: "{{ do_api_key }}" + tag_name: "{{ tag_name }}" + register: result_find + + - name: Verify that ci tag was found + ansible.builtin.assert: + that: + - not result_find.failed + - not result_find.changed + - result_find.data is defined + - result_find.data | length == 1 + - result_find.data.0.name is defined + - result_find.data.0.name == tag_name + + always: + + - name: Delete tag + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ do_api_key }}" + name: integration-test + state: absent + register: delete_tag + + - name: Delete tag with idempotency + community.digitalocean.digital_ocean_tag: + oauth_token: '{{do_api_key}}' + name: integration-test-2 + state: absent + register: delete_tag_idempotent + + - name: Verify that tag was deleted + ansible.builtin.assert: + that: + - delete_tag.changed == True + - delete_tag_idempotent.changed == False diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/tasks/main.yml new file mode 100644 index 00000000..c2844033 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_tag_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather tag information + community.digitalocean.digital_ocean_tag_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify tag information fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/tasks/main.yml new file mode 100644 index 00000000..a5b6262f --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_volume_info/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Gather information about volumes + community.digitalocean.digital_ocean_volume_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify volume info fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/defaults/main.yml new file mode 100644 index 00000000..b296c04c --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/defaults/main.yml @@ -0,0 +1,2 @@ +do_region: nyc1 +vpc_name: gh-ci-vpc diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/tasks/main.yml new file mode 100644 index 00000000..eed068a6 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc/tasks/main.yml @@ -0,0 +1,69 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Create a VPC + community.digitalocean.digital_ocean_vpc: + state: present + oauth_token: "{{ do_api_key }}" + name: "{{ vpc_name }}" + region: "{{ do_region }}" + register: result + + - name: Verify VPC created + ansible.builtin.assert: + that: + - result.changed + - result.data is defined + - result.data.vpc is defined + - result.data.vpc != {} + - result.data.vpc.name == vpc_name + + - name: Set a fact for the VPC ID + ansible.builtin.set_fact: + vpc_id: "{{ result.data.vpc.id }}" + + - name: Fetch VPC members + community.digitalocean.digital_ocean_vpc_info: + oauth_token: "{{ do_api_key }}" + name: "{{ vpc_name }}" + members: true + register: result + + - name: Verify VPC members fetched + ansible.builtin.assert: + that: + - not result.changed + - result.data is defined + - result.data != {} + - result.data.links is defined + - result.data.members is defined + - result.data.meta is defined + + - name: Delete a VPC + community.digitalocean.digital_ocean_vpc: + state: absent + oauth_token: "{{ do_api_key }}" + name: "{{ vpc_name }}" + register: result + + - name: Verify VPC deleted + ansible.builtin.assert: + that: + - result.changed + + always: + + - name: Delete a VPC + community.digitalocean.digital_ocean_vpc: + state: absent + oauth_token: "{{ do_api_key }}" + name: "{{ vpc_name }}" + register: result + ignore_errors: true diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/aliases b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/aliases new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/aliases diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/defaults/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/defaults/main.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/defaults/main.yml diff --git a/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/tasks/main.yml b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/tasks/main.yml new file mode 100644 index 00000000..78a990f3 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/integration/targets/digital_ocean_vpc_info/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- block: + + - name: Ensure API key is provided + ansible.builtin.fail: + msg: do_api_key should be defined in tests/integration/integration_config.yml + when: + - do_api_key is not defined + - do_api_key | length == 0 + + - name: Fetch all VPCs + community.digitalocean.digital_ocean_vpc_info: + oauth_token: "{{ do_api_key }}" + register: result + + - name: Verify VPCs fetched + ansible.builtin.assert: + that: + - not result.changed diff --git a/ansible_collections/community/digitalocean/tests/sanity/ignore-2.12.txt b/ansible_collections/community/digitalocean/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..6fa08266 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/sanity/ignore-2.12.txt @@ -0,0 +1,12 @@ +plugins/modules/digital_ocean_spaces.py compile-2.6!skip +plugins/modules/digital_ocean_spaces_info.py compile-2.6!skip +plugins/modules/digital_ocean_spaces.py compile-2.7!skip +plugins/modules/digital_ocean_spaces_info.py compile-2.7!skip +plugins/modules/digital_ocean_spaces.py compile-3.5!skip +plugins/modules/digital_ocean_spaces_info.py compile-3.5!skip +plugins/modules/digital_ocean_spaces.py import-2.6!skip +plugins/modules/digital_ocean_spaces_info.py import-2.6!skip +plugins/modules/digital_ocean_spaces.py import-2.7!skip +plugins/modules/digital_ocean_spaces_info.py import-2.7!skip +plugins/modules/digital_ocean_spaces.py import-3.5!skip +plugins/modules/digital_ocean_spaces_info.py import-3.5!skip diff --git a/ansible_collections/community/digitalocean/tests/sanity/ignore-2.13.txt b/ansible_collections/community/digitalocean/tests/sanity/ignore-2.13.txt new file mode 100644 index 00000000..56f00467 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/sanity/ignore-2.13.txt @@ -0,0 +1,8 @@ +plugins/modules/digital_ocean_spaces.py compile-2.7!skip +plugins/modules/digital_ocean_spaces_info.py compile-2.7!skip +plugins/modules/digital_ocean_spaces.py compile-3.5!skip +plugins/modules/digital_ocean_spaces_info.py compile-3.5!skip +plugins/modules/digital_ocean_spaces.py import-2.7!skip +plugins/modules/digital_ocean_spaces_info.py import-2.7!skip +plugins/modules/digital_ocean_spaces.py import-3.5!skip +plugins/modules/digital_ocean_spaces_info.py import-3.5!skip diff --git a/ansible_collections/community/digitalocean/tests/sanity/ignore-2.9.txt b/ansible_collections/community/digitalocean/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..f82d2f40 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/sanity/ignore-2.9.txt @@ -0,0 +1,3 @@ +plugins/modules/digital_ocean.py validate-modules:deprecation-mismatch +plugins/modules/digital_ocean.py validate-modules:invalid-documentation +plugins/modules/digital_ocean.py validate-modules:missing-main-call diff --git a/ansible_collections/community/digitalocean/tests/unit/plugins/inventory/test_digitalocean.py b/ansible_collections/community/digitalocean/tests/unit/plugins/inventory/test_digitalocean.py new file mode 100644 index 00000000..340f3610 --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/unit/plugins/inventory/test_digitalocean.py @@ -0,0 +1,306 @@ +# Copyright (c) 2021 Ansible Project +# GNGeneral Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import pytest + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.inventory.data import InventoryData +from ansible.template import Templar +from ansible.parsing.dataloader import DataLoader +import ansible_collections.community.digitalocean.plugins.inventory.digitalocean as module_under_test +from ansible_collections.community.digitalocean.plugins.inventory.digitalocean import ( + InventoryModule, +) + + +@pytest.fixture() +def inventory(): + r = InventoryModule() + r.inventory = InventoryData() + r.templar = Templar(loader=DataLoader()) + return r + + +def test_verify_file_bad_config(inventory): + assert inventory.verify_file("digitalocean_foobar.yml") is False + + +@pytest.fixture() +def payload(): + return [ + { + "id": 3164444, + "name": "foo", + "memory": 1024, + "vcpus": 1, + "disk": 25, + "locked": False, + "status": "active", + "kernel": { + "id": 2233, + "name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic", + "version": "3.13.0-37-generic", + }, + "image": { + "id": 6918990, + "name": "14.04 x64", + "distribution": "Ubuntu", + "slug": "ubuntu-16-04-x64", + }, + "size_slug": "s-1vcpu-1gb", + "networks": { + "v4": [ + { + "ip_address": "104.236.32.182", + "netmask": "255.255.192.0", + "gateway": "104.236.0.1", + "type": "public", + } + ], + "v6": [ + { + "ip_address": "2604:A880:0800:0010:0000:0000:02DD:4001", + "netmask": 64, + "gateway": "2604:A880:0800:0010:0000:0000:0000:0001", + "type": "public", + } + ], + }, + "region": { + "name": "New York 3", + "slug": "nyc3", + }, + "tags": [], + "vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662", + }, + { + "id": 3164445, + "name": "bar", + "memory": 1024, + "vcpus": 1, + "disk": 25, + "locked": False, + "status": "active", + "kernel": { + "id": 2233, + "name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic", + "version": "3.13.0-37-generic", + }, + "image": { + "id": 6918990, + "name": "14.04 x64", + "distribution": "Ubuntu", + "slug": "ubuntu-16-04-x64", + }, + "size_slug": "s-1vcpu-1gb", + "networks": { + "v4": [ + { + "ip_address": "104.236.32.185", + "netmask": "255.255.192.0", + "gateway": "104.236.0.1", + "type": "public", + } + ], + "v6": [ + { + "ip_address": "2604:A880:0800:0010:0000:0000:02DD:4004", + "netmask": 64, + "gateway": "2604:A880:0800:0010:0000:0000:0000:0001", + "type": "public", + } + ], + }, + "region": { + "name": "Frankfurt 1", + "slug": "fra1", + }, + "tags": [], + "vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662", + }, + ] + + +def get_option(option): + options = { + "attributes": ["id", "size_slug"], + "var_prefix": "do_", + "strict": False, + } + return options.get(option) + + +def test_populate_hostvars(inventory, payload, mocker): + inventory.get_option = mocker.MagicMock(side_effect=get_option) + + inventory._populate(payload) + + host_foo = inventory.inventory.get_host("foo") + host_bar = inventory.inventory.get_host("bar") + + assert host_foo.vars["do_id"] == 3164444 + assert host_bar.vars["do_size_slug"] == "s-1vcpu-1gb" + + # if a prefix is set, unprefixed attributes should not appear in host vars + assert "id" not in host_foo.vars + assert "size_slug" not in host_bar.vars + + +@pytest.mark.parametrize("transform", ["never", "ignore"]) +def test_populate_groups_no_sanitization(inventory, mocker, transform): + def get_option(opt): + return dict( + attributes=["id", "tags"], + var_prefix="do_", + keyed_groups=[dict(key="do_tags", prefix="", separator="")], + ).get(opt) + + inventory.get_option = mocker.MagicMock(side_effect=get_option) + mocker.patch("ansible.constants.TRANSFORM_INVALID_GROUP_CHARS", transform) + + inventory._populate( + [ + dict( + id=3164444, + name="test", + tags=["lower", "UPPER", "un_der", "col:on", "da-sh", "with_123"], + ), + ] + ) + + assert set( + ("all", "ungrouped", "lower", "UPPER", "un_der", "col:on", "da-sh", "with_123") + ) == set((inventory.inventory.groups.keys())) + + +@pytest.mark.parametrize("transform", ["always", "silently"]) +def test_populate_groups_sanitization(inventory, mocker, transform): + def get_option(opt): + return dict( + attributes=["id", "tags"], + var_prefix="x_", + keyed_groups=[dict(key="x_tags", prefix="", separator="")], + ).get(opt) + + inventory.get_option = mocker.MagicMock(side_effect=get_option) + mocker.patch("ansible.constants.TRANSFORM_INVALID_GROUP_CHARS", transform) + + inventory._populate( + [ + dict( + id=3164444, + name="test", + tags=["lower", "UPPER", "un_der", "col:on", "da-sh", "with_123"], + ), + ] + ) + + assert set( + ("all", "ungrouped", "lower", "UPPER", "un_der", "col_on", "da_sh", "with_123") + ) == set((inventory.inventory.groups.keys())) + + +def get_option_with_templated_api_token(option): + options = { + # "random_choice" with just a single input always returns the same result. + "api_token": '{{ lookup("random_choice", "my-do-token") }}', + "pagination": 100, + } + return options.get(option) + + +def test_get_payload_with_templated_api_token(inventory, mocker): + inventory.get_option = mocker.MagicMock( + side_effect=get_option_with_templated_api_token + ) + + mocker.patch(module_under_test.__name__ + ".Request") + RequestMock = module_under_test.Request + + req_instance = RequestMock.return_value + req_instance.get.return_value.read.return_value = '{"droplets": []}' + + inventory._get_payload() + + init_headers = RequestMock.call_args.kwargs["headers"] + assert init_headers["Authorization"] == "Bearer my-do-token" + + +def get_option_with_filters(option): + options = { + "attributes": ["id", "size_slug", "region"], + "var_prefix": "do_", + "strict": False, + "filters": [ + 'do_region.slug == "fra1"', + ], + } + return options.get(option) + + +def test_populate_hostvars_with_filters(inventory, payload, mocker): + inventory.get_option = mocker.MagicMock(side_effect=get_option_with_filters) + inventory._populate(payload) + + host_foo = inventory.inventory.get_host("foo") + host_bar = inventory.inventory.get_host("bar") + + assert host_foo is None + assert host_bar.vars["do_size_slug"] == "s-1vcpu-1gb" + + +def get_variables(): + return { + "do_region": { + "slug": "fra1", + }, + "do_tags": ["something"], + } + + +def test_passes_filters_accept_empty(inventory, mocker): + filters = [] + variables = get_variables() + assert inventory._passes_filters(filters, variables, "foo") + + +def test_passes_filters_accept(inventory, mocker): + filters = ['do_region.slug == "fra1"'] + variables = get_variables() + assert inventory._passes_filters(filters, variables, "foo") + + +def test_passes_filters_reject(inventory, mocker): + filters = ['do_region.slug == "nyc3"'] + variables = get_variables() + assert not inventory._passes_filters(filters, variables, "foo") + + +def test_passes_filters_reject_any(inventory, mocker): + filters = [ + 'do_region.slug == "fra1"', # accept + '"nope" in do_tags', # reject + ] + variables = get_variables() + assert not inventory._passes_filters(filters, variables, "foo") + + +def test_passes_filters_invalid_filters(inventory, mocker): + filters = ["not a valid filter"] + variables = get_variables() + assert not inventory._passes_filters(filters, variables, "foo") + + +def test_passes_filters_invalid_filters_strict(inventory, mocker): + filters = ["not a valid filter"] + variables = get_variables() + try: + inventory._passes_filters(filters, variables, "foo", True) + assert False, "expected _passes_filters() to raise AnsibleError" + except AnsibleError as e: + pass diff --git a/ansible_collections/community/digitalocean/tests/unit/plugins/modules/test_digital_ocean_kubernetes.py b/ansible_collections/community/digitalocean/tests/unit/plugins/modules/test_digital_ocean_kubernetes.py new file mode 100644 index 00000000..cfb0f59b --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/unit/plugins/modules/test_digital_ocean_kubernetes.py @@ -0,0 +1,263 @@ +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import MagicMock +from ansible_collections.community.digitalocean.plugins.modules.digital_ocean_kubernetes import ( + DOKubernetes, +) + + +class TestDOKubernetes(unittest.TestCase): + def test_get_by_id_when_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 200 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertEqual(k.get_by_id(), {"foo": "bar"}) + + def test_get_by_id_when_not_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 400 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertIsNone(k.get_by_id()) + + def test_get_all_clusters_when_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 200 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertEqual(k.get_all_clusters(), {"foo": "bar"}) + + def test_get_all_clusters_when_not_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 400 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertIsNone(k.get_all_clusters()) + + def test_get_by_name_none(self): + module = MagicMock() + k = DOKubernetes(module) + self.assertIsNone(k.get_by_name(None)) + + def test_get_by_name_found(self): + module = MagicMock() + k = DOKubernetes(module) + k.get_all_clusters = MagicMock() + k.get_all_clusters.return_value = {"kubernetes_clusters": [{"name": "foo"}]} + self.assertEqual(k.get_by_name("foo"), {"name": "foo"}) + + def test_get_by_name_not_found(self): + module = MagicMock() + k = DOKubernetes(module) + k.get_all_clusters = MagicMock() + k.get_all_clusters.return_value = {"kubernetes_clusters": [{"name": "foo"}]} + self.assertIsNone(k.get_by_name("foo2")) + + def test_get_kubernetes_kubeconfig_when_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 200 + k.rest.get.return_value.body = "kubeconfig" + self.assertEqual(k.get_kubernetes_kubeconfig(), "kubeconfig") + + def test_get_kubernetes_kubeconfig_when_not_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 400 + k.rest.get.return_value.body = "kubeconfig" + self.assertNotEqual(k.get_kubernetes_kubeconfig(), "kubeconfig") + + def test_get_kubernetes_when_found(self): + module = MagicMock() + k = DOKubernetes(module) + k.get_by_name = MagicMock() + k.get_by_name.return_value = {"id": 42} + self.assertEqual(k.get_kubernetes(), {"id": 42}) + + def test_get_kubernetes_when_not_found(self): + module = MagicMock() + k = DOKubernetes(module) + k.get_by_name = MagicMock() + k.get_by_name.return_value = None + self.assertIsNone(k.get_kubernetes()) + + def test_get_kubernetes_options_when_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.json = {"foo": "bar"} + k.rest.get.return_value.status_code = 200 + self.assertEqual(k.get_kubernetes_options(), {"foo": "bar"}) + + def test_get_kubernetes_options_when_not_ok(self): + module = MagicMock() + k = DOKubernetes(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.json = {"foo": "bar"} + k.rest.get.return_value.status_code = 400 + self.assertIsNone(k.get_kubernetes_options()) + + def test_ensure_running_when_running(self): + module = MagicMock() + module.fail_json = MagicMock() + + k = DOKubernetes(module) + k.end_time = 20 + k.wait_timeout = 1 + k.get_by_id = MagicMock() + + cluster = {"kubernetes_cluster": {"status": {"state": "running"}}} + + k.get_by_id.return_value = cluster + + time = MagicMock() + time.time = MagicMock() + time.time.return_value = 10 + time.sleep = MagicMock() + + self.assertEqual(k.ensure_running(), cluster) + + def test_ensure_running_when_not_running(self): + module = MagicMock() + module.fail_json = MagicMock() + + k = DOKubernetes(module) + k.end_time = 20 + k.wait_timeout = -100 + k.get_by_id = MagicMock() + + cluster = {"kubernetes_cluster": {"status": {"state": "stopped"}}} + + k.get_by_id.return_value = cluster + + time = MagicMock() + time.time = MagicMock() + time.time.return_value = 20 + time.sleep = MagicMock() + + # module.fail_json.assert_called() + assert True + + def test_create_ok(self): + module = MagicMock() + module.exit_json = MagicMock() + module.fail_json = MagicMock() + + k = DOKubernetes(module) + k.module = MagicMock() + k.module.params = MagicMock() + + k.module.params.return_value = {"region": "nyc1"} + + k.get_kubernetes_options = MagicMock() + + kubernetes_options = { + "options": { + "regions": [{"name": "New York 1", "slug": "nyc1"}], + "versions": [{"kubernetes_version": "1.18.8", "slug": "1.18.8-do.0"}], + "sizes": [{"name": "s-1vcpu-2gb", "slug": "s-1vcpu-2gb"}], + } + } + + k.get_kubernetes_options.return_value = kubernetes_options + + k.get_kubernetes = MagicMock() + k.get_kubernetes.return_value = {"foo": "bar"} + k.cluster_id = MagicMock() + k.cluster_id.return_value = 42 + + k.rest = MagicMock() + k.rest.post = MagicMock() + k.rest.post.return_value.json = {"kubernetes_cluster": {"id": 42}} + k.rest.post.return_value.status_code = 200 + k.ensure_running = MagicMock() + k.cluster_id = MagicMock() + k.module = MagicMock() + + k.create() + k.module.exit_json.assert_called() + + def test_create_not_ok(self): + module = MagicMock() + module.exit_json = MagicMock() + module.fail_json = MagicMock() + + k = DOKubernetes(module) + k.module = MagicMock() + k.module.params = MagicMock() + + k.module.params.return_value = {"region": "nyc1"} + + k.get_kubernetes_options = MagicMock() + + kubernetes_options = { + "options": { + "regions": [{"name": "New York 1", "slug": "nyc1"}], + "versions": [{"kubernetes_version": "1.18.8", "slug": "1.18.8-do.0"}], + "sizes": [{"name": "s-1vcpu-2gb", "slug": "s-1vcpu-2gb"}], + } + } + + k.get_kubernetes_options.return_value = kubernetes_options + + k.get_kubernetes = MagicMock() + k.get_kubernetes.return_value = {"foo": "bar"} + k.cluster_id = MagicMock() + k.cluster_id.return_value = 42 + + k.rest = MagicMock() + k.rest.post = MagicMock() + k.rest.post.return_value.json = {"kubernetes_cluster": {"id": 42}} + k.rest.post.return_value.status_code = 400 + k.ensure_running = MagicMock() + k.cluster_id = MagicMock() + k.module = MagicMock() + + k.create() + k.module.exit_json.assert_called() + + def test_delete_ok(self): + module = MagicMock() + module.exit_json = MagicMock() + + k = DOKubernetes(module) + + k.get_kubernetes = MagicMock() + + k.rest = MagicMock() + k.rest.delete = MagicMock() + k.rest.delete.return_value.id = 42 + k.rest.delete.return_value.status_code = 204 + + k.delete() + k.module.exit_json.assert_called() + + def test_delete_not_ok(self): + module = MagicMock() + module.exit_json = MagicMock() + + k = DOKubernetes(module) + + k.get_kubernetes = MagicMock() + k.get_kubernetes.return_value = None + + k.delete() + k.module.exit_json.assert_called() diff --git a/ansible_collections/community/digitalocean/tests/unit/plugins/modules/test_digital_ocean_kubernetes_info.py b/ansible_collections/community/digitalocean/tests/unit/plugins/modules/test_digital_ocean_kubernetes_info.py new file mode 100644 index 00000000..034c1c9c --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/unit/plugins/modules/test_digital_ocean_kubernetes_info.py @@ -0,0 +1,116 @@ +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import MagicMock +from ansible_collections.community.digitalocean.plugins.modules.digital_ocean_kubernetes_info import ( + DOKubernetesInfo, +) + + +class TestDOKubernetesInfo(unittest.TestCase): + def test_get_by_id_when_ok(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 200 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertEqual(k.get_by_id(), {"foo": "bar"}) + + def test_get_by_id_when_not_ok(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 400 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertIsNone(k.get_by_id()) + + def test_get_all_clusters_when_ok(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 200 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertEqual(k.get_all_clusters(), {"foo": "bar"}) + + def test_get_all_clusters_when_not_ok(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 400 + k.rest.get.return_value.json = {"foo": "bar"} + self.assertIsNone(k.get_all_clusters()) + + def test_get_by_name_none(self): + module = MagicMock() + k = DOKubernetesInfo(module) + self.assertIsNone(k.get_by_name(None)) + + def test_get_by_name_found(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.get_all_clusters = MagicMock() + k.get_all_clusters.return_value = {"kubernetes_clusters": [{"name": "foo"}]} + self.assertEqual(k.get_by_name("foo"), {"name": "foo"}) + + def test_get_by_name_not_found(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.get_all_clusters = MagicMock() + k.get_all_clusters.return_value = {"kubernetes_clusters": [{"name": "foo"}]} + self.assertIsNone(k.get_by_name("foo2")) + + def test_get_kubernetes_kubeconfig_when_ok(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 200 + k.rest.get.return_value.body = "kubeconfig" + self.assertEqual(k.get_kubernetes_kubeconfig(), "kubeconfig") + + def test_get_kubernetes_kubeconfig_when_not_ok(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.rest = MagicMock() + k.rest.get = MagicMock() + k.rest.get.return_value.status_code = 400 + k.rest.get.return_value.body = "kubeconfig" + self.assertNotEqual(k.get_kubernetes_kubeconfig(), "kubeconfig") + + def test_get_kubernetes_when_found(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.get_by_name = MagicMock() + k.get_by_name.return_value = {"id": 42} + self.assertEqual(k.get_kubernetes(), {"id": 42}) + + def test_get_kubernetes_when_not_found(self): + module = MagicMock() + k = DOKubernetesInfo(module) + k.get_by_name = MagicMock() + k.get_by_name.return_value = None + self.assertIsNone(k.get_kubernetes()) + + def test_get_when_found(self): + module = MagicMock() + module.exit_json = MagicMock() + k = DOKubernetesInfo(module) + k.get_kubernetes = MagicMock() + k.get_kubernetes_kubeconfig = MagicMock() + k.get() + module.exit_json.assert_called() + + def test_get_when_not_found(self): + module = MagicMock() + module.fail_json = MagicMock() + k = DOKubernetesInfo(module) + k.get_kubernetes = MagicMock() + k.get_kubernetes.return_value = None + k.get() + module.fail_json.assert_called() diff --git a/ansible_collections/community/digitalocean/tests/utils/render.sh b/ansible_collections/community/digitalocean/tests/utils/render.sh new file mode 100755 index 00000000..5ffcf1be --- /dev/null +++ b/ansible_collections/community/digitalocean/tests/utils/render.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Renders tests/integration/integration_config.yml + +set -e +set -o pipefail +set -u + +function main() +{ + readonly template="$1"; shift + readonly content="$(cat "$template")" + + eval "echo \"$content\"" +} + +main "$@" |