diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:03:42 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:03:42 +0000 |
commit | 66cec45960ce1d9c794e9399de15c138acb18aed (patch) | |
tree | 59cd19d69e9d56b7989b080da7c20ef1a3fe2a5a /ansible_collections/cloud | |
parent | Initial commit. (diff) | |
download | ansible-upstream.tar.xz ansible-upstream.zip |
Adding upstream version 7.3.0+dfsg.upstream/7.3.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
132 files changed, 12463 insertions, 0 deletions
diff --git a/ansible_collections/cloud/common/CHANGELOG.rst b/ansible_collections/cloud/common/CHANGELOG.rst new file mode 100644 index 00000000..56cf8009 --- /dev/null +++ b/ansible_collections/cloud/common/CHANGELOG.rst @@ -0,0 +1,121 @@ +========================== +cloud.common Release Notes +========================== + +.. contents:: Topics + + +v2.1.2 +====== + +Bugfixes +-------- + +- Ensure we don't shutdown the server when we've still got some ongoing tasks (https://github.com/ansible-collections/cloud.common/pull/109). + +v2.1.1 +====== + +Minor Changes +------------- + +- Move the content of README_ansible_turbo.module.rst in the main README.md to get visibility on Ansible Galaxy. + +Bugfixes +-------- + +- fix parameters with aliases not being passed through (https://github.com/ansible-collections/cloud.common/issues/91). +- fix turbo mode loading incorrect module (https://github.com/ansible-collections/cloud.common/pull/102). +- turbo - Ensure we don't call the module with duplicated aliased parameters. + +v2.1.0 +====== + +Minor Changes +------------- + +- Cosmetic changes in the documentation for the inclusion in the Ansible collection. +- turbo - Extend the unit-test coverage. +- turbo - Use a BSD license for the module_utils and plugin_utils files. +- turbo - add support for coroutine for lookup plugins (https://github.com/ansible-collections/cloud.common/pull/75). + +v2.0.4 +====== + +Major Changes +------------- + +- turbo - enable turbo mode for lookup plugins + +Bugfixes +-------- + +- add exception handler to main async loop (https://github.com/ansible-collections/cloud.common/pull/67). +- pass current task's environment through to execution (https://github.com/ansible-collections/cloud.common/pull/69). +- turbo - AnsibleTurboModule was missing some _ansible_facts variable like _diff, _ansible_tmpdir. (https://github.com/ansible-collections/cloud.common/issues/65) +- turbo - honor the ``remote_tmp`` configuration key. + +v2.0.3 +====== + +Bugfixes +-------- + +- Introduces a fix for the future Python 3.10 (#53) +- turbo - make sure socket doesn't close prematurely, preventing issues with large amounts of data passed as module parameters (https://github.com/ansible-collections/cloud.common/issues/61) + +v2.0.2 +====== + +Bugfixes +-------- + +- Introduces a fix for the future Python 3.10 (#53) +- fail_json method should honor kwargs now when running embedded in server. + +v2.0.1 +====== + +Bugfixes +-------- + +- The profiler is now properly initialized. +- Use the argument_spec values to determine which option should actually be used. +- fix exception messages containing extra single quotes (https://github.com/ansible-collections/cloud.common/pull/46). + +v2.0.0 +====== + +Minor Changes +------------- + +- The ``EmbeddedModuleFailure`` and ``EmbeddedModuleUnexpectedFailure`` exceptions now handle the ``__repr__`` and ``__str__`` method. This means Python is able to print a meaningful output. +- The modules must now set the ``collection_name`` of the ``AnsibleTurboModule`` class. The content of this attribute is used to build the path of the UNIX socket. +- When the background service is started in a console without the ``--daemon`` flag, it now prints information what it runs. +- ``argument_spec`` is now evaluated server-side. +- fail_json now accept and collect extra named arguments. +- raise an exception if the output of module execution cannot be parsed. +- the ``turbo_demo`` module now return the value of counter. +- the user get an error now an error if a module don't raise ``exit_json()`` or ``fail_json()``. + +Bugfixes +-------- + +- the debug mode now work as expected. The ``_ansible_*`` variables are properly passed to the module. + +v1.1.0 +====== + +Minor Changes +------------- + +- ansible_module.turbo - the cache is now associated with the collection, if two collections use a cache, two background services will be started. + +Bugfixes +-------- + +- Ensure the background service starts properly on MacOS (https://github.com/ansible-collections/cloud.common/pull/16) +- do not silently skip parameters when the value is ``False`` + +v1.0.2 +====== diff --git a/ansible_collections/cloud/common/FILES.json b/ansible_collections/cloud/common/FILES.json new file mode 100644 index 00000000..4fa7d309 --- /dev/null +++ b/ansible_collections/cloud/common/FILES.json @@ -0,0 +1,460 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/.keep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a702746a35bb8e74746535d9eca679e9e73e9e3ee0df884f3294c6b884604543", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ed2dc9b735615a898401d871695d090d74624a617d80f7bde0b59abd79fae6e", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df18179bb2f5447a56ac92261a911649b96821c0b2c08eea62d5cc6b0195203f", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup/turbo_demo.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36a1cac200d74a970d78687b8ce86c3e18c201f6bb4d9da7d1c4b5448f822eda", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/turbo", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/turbo/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cb8fa84023f04361a4e2c4fc87ab381e5b238772ab34ec0b258a0fcc2868e14", + "format": 1 + }, + { + "name": "plugins/module_utils/turbo/exceptions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10e897e6a947d1d9971b1a0e10694caeba7f6c52af69e5e8d7f32d4648362185", + "format": 1 + }, + { + "name": "plugins/module_utils/turbo/module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4489bf197e29debb7b46d3ebbd306e5feec859a94f7f18ff87f73b15fe562801", + "format": 1 + }, + { + "name": "plugins/module_utils/turbo/server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b128c3f4641ee531bbbf91d46760a89fd3bea6fb48d191478c0160b53bef81c", + "format": 1 + }, + { + "name": "plugins/module_utils/turbo_demo.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7401a0c8d754a4f09267b95ba7a898b77c2b5b2422341966d83ec81fe406e1bb", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/turbo_demo.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c9439d9544ca0b74e0800c257f3472f59e85f65f0fd55a2b0d8c8ee82e674fd", + "format": 1 + }, + { + "name": "plugins/modules/turbo_fail.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fadd01d905bf6bf22d8d8e59e66f30302afb419af84fed0b385b479da5a2db8", + "format": 1 + }, + { + "name": "plugins/modules/turbo_import.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a68d01cddbbe3254294930452900cc648ce1cdc0d75d6fde1f57f718e0d8e8ac", + "format": 1 + }, + { + "name": "plugins/plugin_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/plugin_utils/turbo", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/plugin_utils/turbo/lookup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "330d51d32974472b1b7bd3195eca2c43d93721b9cae4000e64cf25b71787a862", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_fail", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_fail/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_fail/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ed77ad416a544c74f1109f5fbb190de18ffb324cd10659d529f88d816a49bd5", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_lookup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_lookup/playbook.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "042e219620e78890f28f1dd5fd03e1b3df37b247abc7302e64ff5416f8877c27", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_lookup/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74c081ba1aa13beb5aa8e4a29a60967eeaa5fdaff101ca722614e45c7d2d24c0", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c3c7ea5adfd62a5de0c5c03e4ac6d533086972fa13e3da551142831e7dc1af8", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode/playbook.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b2291f46c8ee017e0f368e4d35e0e3293a161a5e01c5d04c384dc3432a417eb", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16f894cf82c43e1a5a569d3f80c8f3a6db2bd27aa29144be3c1f992c1d771d2a", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode_parallel_exec", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode_parallel_exec/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecae869ac5b55848215776b0da1fc1b5e659116c008e3c0091f2d7917f2d22db", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode_parallel_exec/playbook.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0a754bfa0b4cfe2faff8429ce619049cc729127c6b8ede85d174f5d52a810eb", + "format": 1 + }, + { + "name": "tests/integration/targets/turbo_mode_parallel_exec/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32f07f09fdc38e292cbd2f1919b06fe0c1de8cd49beded2b1484387b03b774cf", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bbc6b24b90efc9dac8b6fd818c5041d0b9d253eec3cb4497a20477eac30a168", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfe428494d19779e919bb4835cf4d443e85274718eea1cc821beafea6e0f803b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "959bcc20c3e5bbd453b53a431b464dfa89080130a9f69d74f01900c9ee29a958", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a94d845075a789f0a27517c787b24b9f9f30056639ae0c014e72ece4f2e1494", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb4341e4a139c3dbb3e4f001ccafbf624c30c7c3a78304bdd99414a91ed16dfd", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bbc6b24b90efc9dac8b6fd818c5041d0b9d253eec3cb4497a20477eac30a168", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_turbo_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d3b03bc47f212e818291b4eff7e925bd296ebb0028134856a33c0570e532f5f", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/turbo", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/turbo/conftest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99a3352cf6215e0a3d767c048fbfefa66aec5cead1330b0b029975e019faeb8c", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/turbo/test_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b7ed86bd31bd4bf172cf9e8b6d1c8715bd0d6128713e6b59418b55ad33ad417", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4956e1dbd41900d08246d8d019954340c669abe02f1f2680f435c63a1534d5a6", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "146f4793aa322ed58a5c2acf2991e798533987aec8a0d488a718f0e343154f12", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c96e9d9f062dcf148633ea985bfce3aca104cf2d1905ad68bfe8372a9f187b5e", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "test-requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2d896f892cafcbf0b9abb0739c94ec8c4e292b56525a9a9aef688bf5b02c096", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0972358b763f92104bcf41a244be29f2098df719ae93d75054cf06d56b7514a7", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/cloud/common/LICENSE b/ansible_collections/cloud/common/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/ansible_collections/cloud/common/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>. diff --git a/ansible_collections/cloud/common/MANIFEST.json b/ansible_collections/cloud/common/MANIFEST.json new file mode 100644 index 00000000..2ca79f8a --- /dev/null +++ b/ansible_collections/cloud/common/MANIFEST.json @@ -0,0 +1,31 @@ +{ + "collection_info": { + "namespace": "cloud", + "name": "common", + "version": "2.1.2", + "authors": [ + "Ansible (https://github.com/ansible)" + ], + "readme": "README.md", + "tags": [ + "cloud", + "virtualization" + ], + "description": "Set of common files for the cloud collections", + "license": [], + "license_file": "LICENSE", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/cloud.common", + "documentation": null, + "homepage": "https://github.com/ansible-collections/cloud.common", + "issues": "https://github.com/ansible-collections/cloud.common/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42429192211a282832a2d1fdc402672c4b93705b12027725b2b9813b3abc0608", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/cloud/common/README.md b/ansible_collections/cloud/common/README.md new file mode 100644 index 00000000..b45e19f2 --- /dev/null +++ b/ansible_collections/cloud/common/README.md @@ -0,0 +1,200 @@ +# cloud.common + +This collection is a library for the cloud modules. It's the home of the following component: + +- ansible_turbo.module: a cache sharing solution to speed up Ansible modules + +More content may be included later. + +# Requirements + +- ansible_turbo.module requires Python 3.6 and Ansible 2.9 or greater. + +## Ansible Turbo Module + +### Current situation + +The traditional execution flow of an Ansible module includes +the following steps: + +- Upload of a ZIP archive with the module and its dependencies +- Execution of the module, which is just a Python script +- Ansible collects the results once the script is finished + +These steps happen for each task of a playbook, and on every host. + +Most of the time, the execution of a module is fast enough for +the user. However, sometime the module requires an important +amount of time, just to initialize itself. This is a common +situation with the API based modules. A classic initialization +involves the following steps: + +- Load a Python library to access the remote resource (via SDK) +- Open a client + - Load a bunch of Python modules. + - Request a new TCP connection. + - Create a session. + - Authenticate the client. + +All these steps are time consuming and the same operations +will be running again and again. + +For instance, here: + +- `import openstack`: takes 0.569s +- `client = openstack.connect()`: takes 0.065s +- `client.authorize()`: takes 1.360s + +These numbers are from test ran against VexxHost public cloud. + +In this case, it's a 2s-ish overhead per task. If the playbook +comes with 10 tasks, the execution time cannot go below 20s. + +### How Ansible Turbo Module improve the situation + +`AnsibleTurboModule` is actually a class that inherites from +the standard `AnsibleModule` class that your modules probably +already use. +The big difference is that when an module starts, it also spawns +a little Python daemon. If a daemon already exists, it will just +reuse it. +All the module logic is run inside this Python daemon. This means: + +- Python modules are actually loaded one time +- Ansible module can reuse an existing authenticated session. + +### How can I enable `AnsibleTurboModule`? + +If you are a collection maintainer and want to enable `AnsibleTurboModule`, you can +follow these steps. +Your module should inherit from `AnsibleTurboModule`, instead of `AnsibleModule`. + +```python + + from ansible_module.turbo.module import AnsibleTurboModule as AnsibleModule + +``` + +You can also use the `functools.lru_cache()` decorator to ask Python to cache +the result of an operation, like a network session creation. + +Finally, if some of the dependeded libraries are large, it may be nice +to defer your module imports, and do the loading AFTER the +`AnsibleTurboModule` instance creation. + +### Example + +The Ansible module is slightly different while using AnsibleTurboModule. +Here are some examples with OpenStack and VMware. + +These examples use `functools.lru_cache` that is the Python core since 3.3. +`lru_cache()` decorator will managed the cache. It uses the function parameters +as unicity criteria. + +- Integration with OpenStack Collection: https://github.com/goneri/ansible-collections-openstack/commit/53ce9860bb84eeab49a46f7a30e3c9588d53e367 +- Integration with VMware Collection: https://github.com/goneri/vmware/commit/d1c02b93cbf899fde3a4665e6bcb4d7531f683a3 +- Integration with Kubernetes Collection: https://github.com/ansible-collections/kubernetes.core/pull/68 + +### Demo + +In this demo, we run one playbook that do several `os_keypair` +calls. For the first time, we run the regular Ansible module. +The second time, we run the same playbook, but with the modified +version. + + +[![asciicast](https://asciinema.org/a/329481.png)](https://asciinema.org/a/329481) + + +### The background service + +The daemon kills itself after 15s, and communication are done +through an Unix socket. +It runs in one single process and uses `asyncio` internally. +Consequently you can use the `async` keyword in your Ansible module. +This will be handy if you interact with a lot of remote systems +at the same time. + +### Security impact + +`ansible_module.turbo` open an Unix socket to interact with the background service. +We use this service to open the connection toward the different target systems. + +This is similar to what SSH does with the sockets. + +Keep in mind that: + +- All the modules can access the same cache. Soon an isolation will be done at the collection level (https://github.com/ansible-collections/cloud.common/pull/17) +- A task can loaded a different version of a library and impact the next tasks. +- If the same user runs two `ansible-playbook` at the same time, they will have access to the same cache. + +When a module stores a session in a cache, it's a good idea to use a hash of the authentication information to identify the session. + +.. note:: You may want to isolate your Ansible environemt in a container, in this case you can consider https://github.com/ansible/ansible-builder + +### Error management + +`ansible_module.turbo` uses exception to communicate a result back to the module. + +- `EmbeddedModuleFailure` is raised when `json_fail()` is called. +- `EmbeddedModuleSuccess` is raised in case of success and return the result to the origin module processthe origin. + +Thse exceptions are defined in `ansible_collections.cloud.common.plugins.module_utils.turbo.exceptions`. +You can raise `EmbeddedModuleFailure` exception yourself, for instance from a module in `module_utils`. + +Be careful with the catch all exception (`except Exception:`). Not only they are bad practice, but also may interface with this mechanism. + +### Troubleshooting + +You may want to manually start the server. This can be done with the following command: + +.. code-block:: shell + + PYTHONPATH=$HOME/.ansible/collections python -m ansible_collections.cloud.common.plugins.module_utils.turbo.server --socket-path $HOME/.ansible/tmp/turbo_mode.foo.bar.socket + +Replace `foo.bar` with the name of the collection. + +You can use the `--help` argument to get a list of the optional parameters. + + +## More information + +<!-- List out where the user can find additional information, such as working group meeting times, slack/IRC channels, or documentation for the product this collection automates. At a minimum, link to: --> + +- [Ansible Collection overview](https://github.com/ansible-collections/overview) +- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) +- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) +- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst) +- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420) +- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45) + + +## Release notes + +See [CHANGELOG.rst](https://github.com/ansible-collections/cloud.common/blob/main/CHANGELOG.rst). + +## Releasing, Versioning and Deprecation + +This collection follows [Semantic Versioning](https://semver.org/). More details on versioning can be found [in the Ansible docs](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections.html#collection-versions). + +We plan to regularly release new minor or bugfix versions once new features or bugfixes have been implemented. + +Releasing happens by tagging the `main` branch. + +## Contributing to this collection + +We welcome community contributions to this collection. If you find problems, please open an issue or create a PR against the [Cloud.Common collection repository](https://github.com/ansible-collections/cloud.common). + +## Code of Conduct + +We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project. + +If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. + +## Licensing + +<!-- Include the appropriate license information here and a pointer to the full licensing details. If the collection contains modules migrated from the ansible/ansible repo, you must use the same license that existed in the ansible/ansible repo. See the GNU license example below. --> + +GNU General Public License v3.0 or later. + +See [LICENSE](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/ansible_collections/cloud/common/changelogs/changelog.yaml b/ansible_collections/cloud/common/changelogs/changelog.yaml new file mode 100644 index 00000000..1456af0c --- /dev/null +++ b/ansible_collections/cloud/common/changelogs/changelog.yaml @@ -0,0 +1,132 @@ +ancestor: null +releases: + 1.0.2: + release_date: '2020-10-06' + 1.1.0: + changes: + bugfixes: + - Ensure the background service starts properly on MacOS (https://github.com/ansible-collections/cloud.common/pull/16) + - do not silently skip parameters when the value is ``False`` + minor_changes: + - ansible_module.turbo - the cache is now associated with the collection, if + two collections use a cache, two background services will be started. + fragments: + - dont_ignore_parameters_when_val_is_False.yaml + - macos_fix.yaml + - one_cache_per_collection.yaml + release_date: '2020-10-23' + 2.0.0: + changes: + bugfixes: + - the debug mode now work as expected. The ``_ansible_*`` variables are properly + passed to the module. + minor_changes: + - The ``EmbeddedModuleFailure`` and ``EmbeddedModuleUnexpectedFailure`` exceptions + now handle the ``__repr__`` and ``__str__`` method. This means Python is able + to print a meaningful output. + - The modules must now set the ``collection_name`` of the ``AnsibleTurboModule`` + class. The content of this attribute is used to build the path of the UNIX + socket. + - When the background service is started in a console without the ``--daemon`` + flag, it now prints information what it runs. + - '``argument_spec`` is now evaluated server-side.' + - fail_json now accept and collect extra named arguments. + - raise an exception if the output of module execution cannot be parsed. + - the ``turbo_demo`` module now return the value of counter. + - the user get an error now an error if a module don't raise ``exit_json()`` + or ``fail_json()``. + fragments: + - Set-the-_ansible_-variables_13334.yaml + - argument_spec_server_side.yaml + - exception_returns_a_printable_content.yaml + - fail_json_accept_extra_kwargs.yaml + - fork_mode_print_information_about_the_module.yaml + - get_the_collection_name_from_an_attribute.yaml + - improve-the-demo-module_5397.yaml + - raise-an-error-if-exit_json-or-fail_json-not-called_13453.yaml + - raise_exception_if_output_parsing_fails.yaml + release_date: '2021-04-20' + 2.0.1: + changes: + bugfixes: + - The profiler is now properly initialized. + - Use the argument_spec values to determine which option should actually be + used. + - fix exception messages containing extra single quotes (https://github.com/ansible-collections/cloud.common/pull/46). + fragments: + - 46-fix-error-message-string.yaml + - actually_enable_the_profiler.yaml + - filter_argument_with_argument_spec.yaml + release_date: '2021-04-22' + 2.0.2: + changes: + bugfixes: + - Introduces a fix for the future Python 3.10 (#53) + - fail_json method should honor kwargs now when running embedded in server. + fragments: + - modulefailure_fix_result.yaml + - py3.10-fix.yaml + release_date: '2021-06-02' + 2.0.3: + changes: + bugfixes: + - Introduces a fix for the future Python 3.10 (#53) + - turbo - make sure socket doesn't close prematurely, preventing issues with + large amounts of data passed as module parameters (https://github.com/ansible-collections/cloud.common/issues/61) + fragments: + - py3.10-fix.yaml + - socket-closure-fix.yaml + release_date: '2021-06-22' + 2.0.4: + changes: + bugfixes: + - add exception handler to main async loop (https://github.com/ansible-collections/cloud.common/pull/67). + - pass current task's environment through to execution (https://github.com/ansible-collections/cloud.common/pull/69). + - turbo - AnsibleTurboModule was missing some _ansible_facts variable like _diff, + _ansible_tmpdir. (https://github.com/ansible-collections/cloud.common/issues/65) + - turbo - honor the ``remote_tmp`` configuration key. + major_changes: + - turbo - enable turbo mode for lookup plugins + fragments: + - 67-add-exception-handler.yaml + - 69-pass-envvar.yaml + - Respect_the_remote_tmp_setting.yaml + - reading-common-variable.yaml + - turbo-for-lookup-plugin.yaml + release_date: '2021-07-29' + 2.1.0: + changes: + minor_changes: + - Cosmetic changes in the documentation for the inclusion in the Ansible collection. + - turbo - Extend the unit-test coverage. + - turbo - Use a BSD license for the module_utils and plugin_utils files. + - turbo - add support for coroutine for lookup plugins (https://github.com/ansible-collections/cloud.common/pull/75). + fragments: + - 0-copy_ignore_txt.yml + - 75-lookup-add-support-for-coroutine.yaml + - cosmetic_changes.yaml + release_date: '2021-10-07' + 2.1.1: + changes: + bugfixes: + - fix parameters with aliases not being passed through (https://github.com/ansible-collections/cloud.common/issues/91). + - fix turbo mode loading incorrect module (https://github.com/ansible-collections/cloud.common/pull/102). + - turbo - Ensure we don't call the module with duplicated aliased parameters. + minor_changes: + - Move the content of README_ansible_turbo.module.rst in the main README.md + to get visibility on Ansible Galaxy. + fragments: + - 0-ignore.yml + - 102-fix-incorrect-module-loading.yaml + - 92-fix-params-with-aliases.yml + - remove_README_ansible_turbo.module.rst.yaml + - remove_aliased_parameters.yaml + release_date: '2022-04-11' + 2.1.2: + changes: + bugfixes: + - Ensure we don't shutdown the server when we've still got some ongoing tasks + (https://github.com/ansible-collections/cloud.common/pull/109). + fragments: + - graceful_shutdown.yaml + release_date: '2022-06-23' diff --git a/ansible_collections/cloud/common/changelogs/config.yaml b/ansible_collections/cloud/common/changelogs/config.yaml new file mode 100644 index 00000000..6b49df02 --- /dev/null +++ b/ansible_collections/cloud/common/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: cloud.common +trivial_section_name: trivial diff --git a/ansible_collections/cloud/common/changelogs/fragments/.keep b/ansible_collections/cloud/common/changelogs/fragments/.keep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloud/common/changelogs/fragments/.keep diff --git a/ansible_collections/cloud/common/meta/runtime.yml b/ansible_collections/cloud/common/meta/runtime.yml new file mode 100644 index 00000000..2ee3c9fa --- /dev/null +++ b/ansible_collections/cloud/common/meta/runtime.yml @@ -0,0 +1,2 @@ +--- +requires_ansible: '>=2.9.10' diff --git a/ansible_collections/cloud/common/plugins/lookup/turbo_demo.py b/ansible_collections/cloud/common/plugins/lookup/turbo_demo.py new file mode 100644 index 00000000..88778cd6 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/lookup/turbo_demo.py @@ -0,0 +1,69 @@ +# Copyright: (c) 2021, Aubin Bikouo (@abikouo) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: turbo_demo +author: + - Aubin Bikouo (@abikouo) + +short_description: A demo for lookup plugins on cloud.common +description: + - return the parent process of the running process +options: + playbook_vars: + description: list of playbook variables to add in the output. + type: list + elements: str +""" + +EXAMPLES = r""" +""" + +RETURN = r""" +""" + + +import os +import sys +import traceback + +from ansible_collections.cloud.common.plugins.plugin_utils.turbo.lookup import ( + TurboLookupBase as LookupBase, +) + + +def counter(): + counter.i += 1 + return counter.i + + +# NOTE: workaround to avoid a warning with ansible-doc +if True: # pylint: disable=using-constant-test + counter.i = 0 + + +async def execute(terms, variables, playbook_vars): + result = [] + result.append("running from pid: {pid}".format(pid=os.getpid())) + if playbook_vars is not None: + result += [ + variables["vars"].get(x) for x in playbook_vars if x in variables["vars"] + ] + if terms: + result += terms + + for id, stack in list(sys._current_frames().items()): + for fname, line_id, name, line in traceback.extract_stack(stack): + if fname == __file__: + continue + + result.append("turbo_demo_counter: {0}".format(counter())) + return result + + +class LookupModule(LookupBase): + async def _run(self, terms, variables=None, playbook_vars=None): + result = await execute(terms, variables, playbook_vars) + return result + + run = _run if not hasattr(LookupBase, "run_on_daemon") else LookupBase.run_on_daemon diff --git a/ansible_collections/cloud/common/plugins/module_utils/turbo/common.py b/ansible_collections/cloud/common/plugins/module_utils/turbo/common.py new file mode 100644 index 00000000..e5ad1938 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/module_utils/turbo/common.py @@ -0,0 +1,125 @@ +# Copyright (c) 2021 Red Hat +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import os +import socket +import sys +import time +import subprocess +import pickle +from contextlib import contextmanager +import json + +from .exceptions import ( + EmbeddedModuleUnexpectedFailure, +) + + +class AnsibleTurboSocket: + def __init__(self, socket_path, ttl=None, plugin="module"): + self._socket_path = socket_path + self._ttl = ttl + self._plugin = plugin + self._socket = None + + def bind(self): + running = False + self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + for attempt in range(100, -1, -1): + try: + self._socket.connect(self._socket_path) + return True + except (ConnectionRefusedError, FileNotFoundError): + if not running: + running = self.start_server() + if attempt == 0: + raise + time.sleep(0.01) + + def start_server(self): + env = os.environ + parameters = [ + "--fork", + "--socket-path", + self._socket_path, + ] + + if self._ttl: + parameters += ["--ttl", str(self._ttl)] + + command = [sys.executable] + if self._plugin == "module": + ansiblez_path = sys.path[0] + env.update({"PYTHONPATH": ansiblez_path}) + command += [ + "-m", + "ansible_collections.cloud.common.plugins.module_utils.turbo.server", + ] + else: + parent_dir = os.path.dirname(__file__) + server_path = os.path.join(parent_dir, "server.py") + command += [server_path] + p = subprocess.Popen( + command + parameters, + env=env, + close_fds=True, + ) + p.communicate() + return p.pid + + def communicate(self, data, wait_sleep=0.01): + encoded_data = pickle.dumps((self._plugin, data)) + self._socket.sendall(encoded_data) + self._socket.shutdown(socket.SHUT_WR) + raw_answer = b"" + while True: + b = self._socket.recv((1024 * 1024)) + if not b: + break + raw_answer += b + time.sleep(wait_sleep) + try: + result = json.loads(raw_answer.decode()) + return result + except json.decoder.JSONDecodeError: + raise EmbeddedModuleUnexpectedFailure( + "Cannot decode plugin answer: {0}".format(raw_answer) + ) + + def close(self): + if self._socket: + self._socket.close() + + +@contextmanager +def connect(socket_path, ttl=None, plugin="module"): + turbo_socket = AnsibleTurboSocket(socket_path=socket_path, ttl=ttl, plugin=plugin) + try: + turbo_socket.bind() + yield turbo_socket + finally: + turbo_socket.close() diff --git a/ansible_collections/cloud/common/plugins/module_utils/turbo/exceptions.py b/ansible_collections/cloud/common/plugins/module_utils/turbo/exceptions.py new file mode 100644 index 00000000..acad2cba --- /dev/null +++ b/ansible_collections/cloud/common/plugins/module_utils/turbo/exceptions.py @@ -0,0 +1,65 @@ +# Copyright (c) 2021 Red Hat +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +class EmbeddedModuleFailure(Exception): + def __init__(self, msg, **kwargs): + self._message = msg + self._kwargs = kwargs + + def get_message(self): + return self._message + + @property + def kwargs(self): + return self._kwargs + + def __repr__(self): + return repr(self.get_message()) + + def __str__(self): + return str(self.get_message()) + + +class EmbeddedModuleUnexpectedFailure(Exception): + def __init__(self, msg): + self._message = msg + + def get_message(self): + return self._message + + def __repr__(self): + return repr(self.get_message()) + + def __str__(self): + return str(self.get_message()) + + +class EmbeddedModuleSuccess(Exception): + def __init__(self, **kwargs): + self.kwargs = kwargs diff --git a/ansible_collections/cloud/common/plugins/module_utils/turbo/module.py b/ansible_collections/cloud/common/plugins/module_utils/turbo/module.py new file mode 100644 index 00000000..c2f9d667 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/module_utils/turbo/module.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021 Red Hat +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json +import os +import os.path +import sys +import tempfile + +import ansible.module_utils.basic +from .exceptions import ( + EmbeddedModuleSuccess, + EmbeddedModuleFailure, +) +import ansible_collections.cloud.common.plugins.module_utils.turbo.common + +if False: # pylint: disable=using-constant-test + from .server import please_include_me + + # This is a trick to be sure server.py is embedded in the Ansiblez + # zip archive.🥷 + please_include_me + + +def get_collection_name_from_path(): + module_path = ansible.module_utils.basic.get_module_path() + + ansiblez = module_path.split("/")[-3] + if ansiblez.startswith("ansible_") and ansiblez.endswith(".zip"): + return ".".join(ansiblez[8:].split(".")[:2]) + + +def expand_argument_specs_aliases(argument_spec): + """Returns a dict of accepted argument that includes the aliases""" + expanded_argument_specs = {} + for k, v in argument_spec.items(): + for alias in [k] + v.get("aliases", []): + expanded_argument_specs[alias] = v + return expanded_argument_specs + + +def prepare_args(argument_specs, params): + """Take argument_spec and the user params and prepare the final argument structure.""" + + def _keep_value(v, argument_specs, key, subkey=None): + if v is None: # cannot be a valide parameter + return False + if key not in argument_specs: # should never happen + return + if not subkey: # level 1 parameter + return v != argument_specs[key].get("default") + elif subkey not in argument_specs[key]: # Freeform + return True + elif isinstance(argument_specs[key][subkey], dict): + return v != argument_specs[key][subkey].get("default") + else: # should never happen + return True + + def _is_an_alias(k): + aliases = argument_specs[k].get("aliases") + return aliases and k in aliases + + new_params = {} + for k, v in params.items(): + if not _keep_value(v, argument_specs, k): + continue + + if _is_an_alias(k): + continue + + if isinstance(v, dict): + new_params[k] = { + i: j for i, j in v.items() if _keep_value(j, argument_specs, k, i) + } + else: + new_params[k] = v + args = {"ANSIBLE_MODULE_ARGS": new_params} + return args + + +class AnsibleTurboModule(ansible.module_utils.basic.AnsibleModule): + embedded_in_server = False + collection_name = None + + def __init__(self, *args, **kwargs): + self.embedded_in_server = sys.argv[0].endswith("/server.py") + self.collection_name = ( + AnsibleTurboModule.collection_name or get_collection_name_from_path() + ) + ansible.module_utils.basic.AnsibleModule.__init__( + self, *args, bypass_checks=not self.embedded_in_server, **kwargs + ) + self._running = None + if not self.embedded_in_server: + self.run_on_daemon() + + def socket_path(self): + if self._remote_tmp is None: + abs_remote_tmp = tempfile.gettempdir() + else: + abs_remote_tmp = os.path.expanduser(os.path.expandvars(self._remote_tmp)) + return os.path.join(abs_remote_tmp, f"turbo_mode.{self.collection_name}.socket") + + def init_args(self): + argument_specs = expand_argument_specs_aliases(self.argument_spec) + args = prepare_args(argument_specs, self.params) + for k in ansible.module_utils.basic.PASS_VARS: + attribute = ansible.module_utils.basic.PASS_VARS[k][0] + if not hasattr(self, attribute): + continue + v = getattr(self, attribute) + if isinstance(v, int) or isinstance(v, bool) or isinstance(v, str): + args["ANSIBLE_MODULE_ARGS"][f"_ansible_{k}"] = v + return args + + def run_on_daemon(self): + result = dict(changed=False, original_message="", message="") + ttl = os.environ.get("ANSIBLE_TURBO_LOOKUP_TTL", None) + with ansible_collections.cloud.common.plugins.module_utils.turbo.common.connect( + socket_path=self.socket_path(), ttl=ttl + ) as turbo_socket: + ansiblez_path = sys.path[0] + args = self.init_args() + data = [ + ansiblez_path, + json.dumps(args), + dict(os.environ), + ] + content = json.dumps(data).encode() + result = turbo_socket.communicate(content) + self.exit_json(**result) + + def exit_json(self, **kwargs): + if not self.embedded_in_server: + super().exit_json(**kwargs) + else: + self.do_cleanup_files() + raise EmbeddedModuleSuccess(**kwargs) + + def fail_json(self, *args, **kwargs): + if not self.embedded_in_server: + super().fail_json(**kwargs) + else: + self.do_cleanup_files() + raise EmbeddedModuleFailure(*args, **kwargs) diff --git a/ansible_collections/cloud/common/plugins/module_utils/turbo/server.py b/ansible_collections/cloud/common/plugins/module_utils/turbo/server.py new file mode 100644 index 00000000..028110c2 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/module_utils/turbo/server.py @@ -0,0 +1,395 @@ +# Copyright (c) 2021 Red Hat +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import argparse +import asyncio +from datetime import datetime +import importlib + +# py38 only, See: https://github.com/PyCQA/pylint/issues/2976 +import inspect # pylint: disable=syntax-error +import io +import json + +# py38 only, See: https://github.com/PyCQA/pylint/issues/2976 +import collections # pylint: disable=syntax-error +import os +import signal +import sys +import traceback +import zipfile +from zipimport import zipimporter +import pickle +import uuid + +sys_path_lock = None +env_lock = None + +import ansible.module_utils.basic + +please_include_me = "bar" + + +def fork_process(): + """ + This function performs the double fork process to detach from the + parent process and execute. + """ + pid = os.fork() + + if pid == 0: + fd = os.open(os.devnull, os.O_RDWR) + + # clone stdin/out/err + for num in range(3): + if fd != num: + os.dup2(fd, num) + + if fd not in range(3): + os.close(fd) + + pid = os.fork() + if pid > 0: + os._exit(0) + + # get new process session and detach + sid = os.setsid() + if sid == -1: + raise Exception("Unable to detach session while daemonizing") + + # avoid possible problems with cwd being removed + os.chdir("/") + + pid = os.fork() + if pid > 0: + sys.exit(0) # pylint: disable=ansible-bad-function + else: + sys.exit(0) # pylint: disable=ansible-bad-function + return pid + + +class EmbeddedModule: + def __init__(self, ansiblez_path, params): + self.ansiblez_path = ansiblez_path + self.collection_name, self.module_name = self.find_module_name() + self.params = params + self.module_class = None + self.debug_mode = False + self.module_path = ( + "ansible_collections.{collection_name}." "plugins.modules.{module_name}" + ).format(collection_name=self.collection_name, module_name=self.module_name) + + def find_module_name(self): + with zipfile.ZipFile(self.ansiblez_path) as zip: + for path in zip.namelist(): + if not path.startswith("ansible_collections"): + continue + if not path.endswith(".py"): + continue + if path.endswith("__init__.py"): + continue + splitted = path.split("/") + if len(splitted) != 6: + continue + if splitted[-3:-1] != ["plugins", "modules"]: + continue + collection = ".".join(splitted[1:3]) + name = splitted[-1][:-3] + return collection, name + raise Exception("Cannot find module name") + + async def load(self): + async with sys_path_lock: + # Add the Ansiblez_path in sys.path + sys.path.insert(0, self.ansiblez_path) + + # resettle the loaded modules that were associated + # with a different Ansiblez. + for path, module in sorted(tuple(sys.modules.items())): + if path and module and path.startswith("ansible_collections"): + try: + prefix = sys.modules[path].__loader__.prefix + except AttributeError: + # Not from a zipimporter loader, skipping + continue + # Reload package modules only, to pick up new modules from + # packages that have been previously loaded. + if hasattr(sys.modules[path], "__path__"): + py_path = self.ansiblez_path + os.sep + prefix + my_loader = zipimporter(py_path) + sys.modules[path].__loader__ = my_loader + try: + importlib.reload(sys.modules[path]) + except ModuleNotFoundError: + pass + # Finally, load the plugin class. + self.module_class = importlib.import_module(self.module_path) + + async def unload(self): + async with sys_path_lock: + sys.path = [i for i in sys.path if i != self.ansiblez_path] + + def create_profiler(self): + if self.debug_mode: + import cProfile + + pr = cProfile.Profile() + pr.enable() + return pr + + def print_profiling_info(self, pr): + if self.debug_mode: + import pstats + + sortby = pstats.SortKey.CUMULATIVE + ps = pstats.Stats(pr).sort_stats(sortby) + ps.print_stats(20) + + def print_backtrace(self, backtrace): + if self.debug_mode: + print(backtrace) # pylint: disable=ansible-bad-function + + async def run(self): + class FakeStdin: + buffer = None + + from .exceptions import ( + EmbeddedModuleFailure, + EmbeddedModuleUnexpectedFailure, + EmbeddedModuleSuccess, + ) + + # monkeypatching to pass the argument to the module, this is not + # really safe, and in the future, this will prevent us to run several + # modules in parallel. We can maybe use a scoped monkeypatch instead + _fake_stdin = FakeStdin() + _fake_stdin.buffer = io.BytesIO(self.params.encode()) + sys.stdin = _fake_stdin + # Trick to be sure ansible.module_utils.basic._load_params() won't + # try to build the module parameters from the daemon arguments + sys.argv = sys.argv[:1] + ansible.module_utils.basic._ANSIBLE_ARGS = None + pr = self.create_profiler() + if not hasattr(self.module_class, "main"): + raise EmbeddedModuleFailure("No main() found!") + try: + if inspect.iscoroutinefunction(self.module_class.main): + await self.module_class.main() + elif pr: + pr.runcall(self.module_class.main) + else: + self.module_class.main() + except EmbeddedModuleSuccess as e: + self.print_profiling_info(pr) + return e.kwargs + except EmbeddedModuleFailure as e: + backtrace = traceback.format_exc() + self.print_backtrace(backtrace) + raise + except Exception as e: + backtrace = traceback.format_exc() + self.print_backtrace(backtrace) + raise EmbeddedModuleUnexpectedFailure(str(backtrace)) + else: + raise EmbeddedModuleUnexpectedFailure( + "Likely a bug: exit_json() or fail_json() should be called during the module excution" + ) + + +async def run_as_lookup_plugin(data): + errors = None + try: + import ansible.plugins.loader as plugin_loader + from ansible.parsing.dataloader import DataLoader + from ansible.template import Templar + from ansible.module_utils._text import to_native + + ( + lookup_name, + terms, + variables, + kwargs, + ) = data + + # load lookup plugin + templar = Templar(loader=DataLoader(), variables=None) + ansible_collections = "ansible_collections." + if lookup_name.startswith(ansible_collections): + lookup_name = lookup_name.replace(ansible_collections, "", 1) + ansible_plugins_lookup = ".plugins.lookup." + if ansible_plugins_lookup in lookup_name: + lookup_name = lookup_name.replace(ansible_plugins_lookup, ".", 1) + + instance = plugin_loader.lookup_loader.get( + name=lookup_name, loader=templar._loader, templar=templar + ) + + if not hasattr(instance, "_run"): + return [None, "No _run() found"] + if inspect.iscoroutinefunction(instance._run): + result = await instance._run(terms, variables=variables, **kwargs) + else: + result = instance._run(terms, variables=variables, **kwargs) + except Exception as e: + errors = to_native(e) + return [result, errors] + + +async def run_as_module(content, debug_mode): + from ansible_collections.cloud.common.plugins.module_utils.turbo.exceptions import ( + EmbeddedModuleFailure, + ) + + try: + ( + ansiblez_path, + params, + env, + ) = json.loads(content) + if debug_mode: + print( # pylint: disable=ansible-bad-function + f"-----\nrunning {ansiblez_path} with params: ¨{params}¨" + ) + + embedded_module = EmbeddedModule(ansiblez_path, params) + if debug_mode: + embedded_module.debug_mode = True + + await embedded_module.load() + try: + async with env_lock: + os.environ.clear() + os.environ.update(env) + result = await embedded_module.run() + except SystemExit: + backtrace = traceback.format_exc() + result = {"msg": str(backtrace), "failed": True} + except EmbeddedModuleFailure as e: + result = {"msg": str(e), "failed": True} + if e.kwargs: + result.update(e.kwargs) + except Exception as e: + result = { + "msg": traceback.format_stack() + [str(e)], + "failed": True, + } + await embedded_module.unload() + except Exception as e: + result = {"msg": traceback.format_stack() + [str(e)], "failed": True} + return result + + +class AnsibleVMwareTurboMode: + def __init__(self): + self.sessions = collections.defaultdict(dict) + self.socket_path = None + self.ttl = None + self.debug_mode = None + self.jobs_ongoing = {} + + async def ghost_killer(self): + while True: + await asyncio.sleep(self.ttl) + running_jobs = { + job_id: start_date + for job_id, start_date in self.jobs_ongoing.items() + if (datetime.now() - start_date).total_seconds() < 3600 + } + if running_jobs: + continue + self.stop() + + async def handle(self, reader, writer): + self._watcher.cancel() + self._watcher = self.loop.create_task(self.ghost_killer()) + job_id = str(uuid.uuid4()) + self.jobs_ongoing[job_id] = datetime.now() + raw_data = await reader.read() + if not raw_data: + return + + (plugin_type, content) = pickle.loads(raw_data) + + def _terminate(result): + writer.write(json.dumps(result).encode()) + writer.close() + + if plugin_type == "module": + result = await run_as_module(content, debug_mode=self.debug_mode) + elif plugin_type == "lookup": + result = await run_as_lookup_plugin(content) + _terminate(result) + del self.jobs_ongoing[job_id] + + def handle_exception(self, loop, context): + e = context.get("exception") + traceback.print_exception(type(e), e, e.__traceback__) + self.stop() + + def start(self): + self.loop = asyncio.get_event_loop() + self.loop.add_signal_handler(signal.SIGTERM, self.stop) + self.loop.set_exception_handler(self.handle_exception) + self._watcher = self.loop.create_task(self.ghost_killer()) + + import sys + + if sys.hexversion >= 0x30A00B1: + # py3.10 drops the loop argument of create_task. + self.loop.create_task( + asyncio.start_unix_server(self.handle, path=self.socket_path) + ) + else: + self.loop.create_task( + asyncio.start_unix_server( + self.handle, path=self.socket_path, loop=self.loop + ) + ) + self.loop.run_forever() + + def stop(self): + os.unlink(self.socket_path) + self.loop.stop() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Start a background daemon.") + parser.add_argument("--socket-path") + parser.add_argument("--ttl", default=15, type=int) + parser.add_argument("--fork", action="store_true") + + args = parser.parse_args() + if args.fork: + fork_process() + sys_path_lock = asyncio.Lock() + env_lock = asyncio.Lock() + + server = AnsibleVMwareTurboMode() + server.socket_path = args.socket_path + server.ttl = args.ttl + server.debug_mode = not args.fork + server.start() diff --git a/ansible_collections/cloud/common/plugins/module_utils/turbo_demo.py b/ansible_collections/cloud/common/plugins/module_utils/turbo_demo.py new file mode 100644 index 00000000..1a14f075 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/module_utils/turbo_demo.py @@ -0,0 +1 @@ +# This module is part of the test suite to check the import logic of turbo mode diff --git a/ansible_collections/cloud/common/plugins/modules/turbo_demo.py b/ansible_collections/cloud/common/plugins/modules/turbo_demo.py new file mode 100644 index 00000000..30093b55 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/modules/turbo_demo.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (C) 2020, Gonéri Le Bouder <goneri@lebouder.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: turbo_demo +short_description: A demo module for ansible_module.turbo +version_added: "1.0.0" +description: +- "This module is an example of an ansible_module.turbo integration." +author: +- Gonéri Le Bouder (@goneri) +""" + +EXAMPLES = r""" +- name: Run the module + cloud.common.turbo_demo: +""" + +import os + +from ansible_collections.cloud.common.plugins.module_utils.turbo.module import ( + AnsibleTurboModule as AnsibleModule, +) + + +def counter(): + counter.i += 1 + return counter.i + + +# NOTE: workaround to avoid a warning with ansible-doc +if True: # pylint: disable=using-constant-test + counter.i = 0 + + +def get_message(): + return f"This is me running with PID: {os.getpid()}, called {counter.i} time(s)" + + +def run_module(): + result = {} + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule(argument_spec={}, supports_check_mode=True) + module.collection_name = "cloud.common" + previous_value = counter.i + if not module.check_mode: + counter() + result["changed"] = True + result["message"] = get_message() + result["counter"] = counter.i + result["envvar"] = os.environ.get("TURBO_TEST_VAR") + + if module._diff: + result["diff"] = {"before": previous_value, "after": counter.i} + + module.exit_json(**result) + + +def main(): + from ansible_collections.cloud.common.plugins.module_utils import turbo_demo + + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/cloud/common/plugins/modules/turbo_fail.py b/ansible_collections/cloud/common/plugins/modules/turbo_fail.py new file mode 100644 index 00000000..d9b4731f --- /dev/null +++ b/ansible_collections/cloud/common/plugins/modules/turbo_fail.py @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (C) 2021, Aubin Bikouo <abikouo> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: turbo_fail +short_description: A short module which honor additional args when calling fail_json +version_added: "1.0.0" +description: +- "This module aims to test fail_json method on Ansible.turbo module" +options: + params: + description: + - parameter to display in task output + required: false + type: dict +author: +- Aubin Bikouo (@abikouo) +""" + +EXAMPLES = r""" +- name: Fail without additional arguments + cloud.common.turbo_fail: + +- name: Fail with additional arguments + cloud.common.turbo_fail: + params: + test: "ansible" +""" + +import os + +from ansible_collections.cloud.common.plugins.module_utils.turbo.module import ( + AnsibleTurboModule as AnsibleModule, +) + + +def run_module(): + module = AnsibleModule( + argument_spec=dict( + params=dict(type="dict"), + ) + ) + module.collection_name = "cloud.common" + msg = "ansible.cloud.fail" + if module.params.get("params"): + module.fail_json(msg=msg, **module.params.get("params")) + module.fail_json(msg=msg) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/cloud/common/plugins/modules/turbo_import.py b/ansible_collections/cloud/common/plugins/modules/turbo_import.py new file mode 100644 index 00000000..152107c4 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/modules/turbo_import.py @@ -0,0 +1,46 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (C) 2022, Red Hat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: turbo_import +short_description: A demo module to test import logic for turbo mode +version_added: "1.0.0" +description: +- "This module tests the import logic for turbo mode." +author: +- Mike Graves (@gravesm) +""" + +EXAMPLES = r""" +- name: Run the module + cloud.common.turbo_import: +""" + + +from ansible_collections.cloud.common.plugins.module_utils.turbo.module import ( + AnsibleTurboModule as AnsibleModule, +) + + +def run_module(): + module = AnsibleModule(argument_spec={}) + module.collection_name = "cloud.common" + module.exit_json(changed=False) + + +def main(): + from ansible_collections.cloud.common.plugins.module_utils import turbo_demo + + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/cloud/common/plugins/plugin_utils/turbo/lookup.py b/ansible_collections/cloud/common/plugins/plugin_utils/turbo/lookup.py new file mode 100644 index 00000000..c3332333 --- /dev/null +++ b/ansible_collections/cloud/common/plugins/plugin_utils/turbo/lookup.py @@ -0,0 +1,91 @@ +# Copyright (c) 2021 Red Hat +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from ansible.plugins.lookup import LookupBase +from ansible.module_utils.six import string_types +import ansible_collections.cloud.common.plugins.module_utils.turbo.common +from ansible_collections.cloud.common.plugins.module_utils.turbo.exceptions import ( + EmbeddedModuleUnexpectedFailure, +) + + +def get_server_ttl(variables): + # trying to retrieve first TTL from environment variable + ttl = os.environ.get("ANSIBLE_TURBO_LOOKUP_TTL", None) + if ttl is not None: + return ttl + # Read TTL from ansible environment + for env_var in variables.get("environment", []): + value = env_var.get("ANSIBLE_TURBO_LOOKUP_TTL", None) + test_var_int = [ + isinstance(value, string_types) and value.isnumeric(), + isinstance(value, int), + ] + if value is not None and any(test_var_int): + ttl = value + return ttl + + +class TurboLookupBase(LookupBase): + def run_on_daemon(self, terms, variables=None, **kwargs): + self._ttl = get_server_ttl(variables) + return self.execute(terms=terms, variables=variables, **kwargs) + + @property + def socket_path(self): + if not hasattr(self, "__socket_path"): + """ + Input: + _load_name: ansible_collections.cloud.common.plugins.lookup.turbo_random_lookup + Output: + __socket_path: {HOME}/.ansible/tmp/turbo_mode_cloud.common.socket + this will allow to have one socket per collection + """ + name = self._load_name + ansible_collections = "ansible_collections." + if name.startswith(ansible_collections): + name = name.replace(ansible_collections, "", 1) + lookup_plugins = ".plugins.lookup." + idx = name.find(lookup_plugins) + if idx != -1: + name = name[:idx] + self.__socket_path = os.environ[ + "HOME" + ] + "/.ansible/tmp/turbo_lookup.{0}.socket".format(name) + return self.__socket_path + + def execute(self, terms, variables=None, **kwargs): + with ansible_collections.cloud.common.plugins.module_utils.turbo.common.connect( + socket_path=self.socket_path, ttl=self._ttl, plugin="lookup" + ) as turbo_socket: + content = (self._load_name, terms, variables, kwargs) + (result, errors) = turbo_socket.communicate(content) + if errors: + raise EmbeddedModuleUnexpectedFailure(errors) + return result diff --git a/ansible_collections/cloud/common/requirements.txt b/ansible_collections/cloud/common/requirements.txt new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloud/common/requirements.txt diff --git a/ansible_collections/cloud/common/test-requirements.txt b/ansible_collections/cloud/common/test-requirements.txt new file mode 100644 index 00000000..ee6352ad --- /dev/null +++ b/ansible_collections/cloud/common/test-requirements.txt @@ -0,0 +1,2 @@ +coverage==4.5.4 +pytest-xdist diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_fail/tasks/main.yaml b/ansible_collections/cloud/common/tests/integration/targets/turbo_fail/tasks/main.yaml new file mode 100644 index 00000000..069ba449 --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_fail/tasks/main.yaml @@ -0,0 +1,24 @@ +- name: fail with no additionnal args + cloud.common.turbo_fail: + register: result + failed_when: not result.failed + +- assert: + that: + - result | list | difference(["failed_when_result"]) | sort == ["changed", "failed", "msg"] + +- name: fail with additionnal args + cloud.common.turbo_fail: + params: + test: + phase: integration + release: 1.0 + environment: production + register: result + failed_when: not result.failed + +- assert: + that: + - result.environment == "production" + - result.test.phase == "integration" + - result.test.release == 1.0 diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_lookup/playbook.yaml b/ansible_collections/cloud/common/tests/integration/targets/turbo_lookup/playbook.yaml new file mode 100644 index 00000000..836eb6d9 --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_lookup/playbook.yaml @@ -0,0 +1,81 @@ +- hosts: localhost + gather_facts: no + + environment: + ANSIBLE_TURBO_LOOKUP_TTL: 1 + ANOTHER_ANSIBLE_VARS: 10 + + tasks: + - name: variables definition + set_fact: + var00: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + var01: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + + - pause: + seconds: 2 + + - name: variables definition + set_fact: + var10: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + var11: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + environment: + ANSIBLE_TURBO_LOOKUP_TTL: 4 + + - pause: + seconds: 2 + + - name: variables definition + set_fact: + var20: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + var21: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + + - name: validate output + assert: + that: + - '"turbo_demo_counter: 1" in var00' + - '"turbo_demo_counter: 2" in var01' + - '"turbo_demo_counter: 1" in var10' + - '"turbo_demo_counter: 2" in var11' + - '"turbo_demo_counter: 3" in var20' + - '"turbo_demo_counter: 4" in var21' + + - name: Wait for the socket to be closed + pause: + seconds: 5 + + +- hosts: localhost + gather_facts: no + + vars: + turbo_play_var: "simple ansible playbook variable" + + tasks: + - name: set variables using lookup plugin + set_fact: + var0: "{{ lookup('cloud.common.turbo_demo', terms, playbook_vars=['turbo_play_var', 'turbo_task_var'], wantlist=True) }}" + var1: "{{ lookup('cloud.common.turbo_demo', terms, playbook_vars=['turbo_task_var'], wantlist=True) }}" + var2: "{{ lookup('cloud.common.turbo_demo', terms, playbook_vars=['turbo_play_var'], wantlist=True) }}" + vars: + terms: ["2.9", "2.10"] + turbo_task_var: "simple ansible task variable" + + - name: test lookup plugin using a module + debug: + msg: "{{ lookup('cloud.common.turbo_demo', wantlist=True) }}" + register: output + + - name: validate other settings + assert: + that: + - '"turbo_demo_counter: 1" in var0' + - '"turbo_demo_counter: 2" in var1' + - '"turbo_demo_counter: 3" in var2' + - '"turbo_demo_counter: 4" in output.msg' + - '"simple ansible task variable" in var0' + - '"simple ansible task variable" in var1' + - 'turbo_play_var in var0' + - 'turbo_play_var in var2' + - '["2.9", "2.10"] in var0' + - '["2.9", "2.10"] in var1' + - '["2.9", "2.10"] in var2' diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_lookup/runme.sh b/ansible_collections/cloud/common/tests/integration/targets/turbo_lookup/runme.sh new file mode 100755 index 00000000..5e27470e --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_lookup/runme.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +set -eux +exec ansible-playbook playbook.yaml -vvv diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/playbook.yaml b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/playbook.yaml new file mode 100644 index 00000000..fd01f146 --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/playbook.yaml @@ -0,0 +1,5 @@ +- hosts: localhost + gather_facts: no + tasks: + - import_role: + name: turbo_mode diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/runme.sh b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/runme.sh new file mode 100755 index 00000000..4c43575d --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/runme.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -eux +export ANSIBLE_ROLES_PATH=.. +exec ansible-playbook playbook.yaml diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/tasks/main.yaml b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/tasks/main.yaml new file mode 100644 index 00000000..5be80f6c --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode/tasks/main.yaml @@ -0,0 +1,77 @@ +- cloud.common.turbo_import: + +- cloud.common.turbo_demo: + with_sequence: count=10 + register: _result +- debug: var=_result.results[-1] +- assert: + that: + - _result.results[-1].counter == 10 +- cloud.common.turbo_demo: + with_sequence: count=10 + check_mode: True + register: _result +- assert: + that: + - _result.results[-1].counter == 10 +- cloud.common.turbo_demo: + with_sequence: count=10 + become: true + register: _result +- assert: + that: + - _result.results[-1].counter == 10 +- cloud.common.turbo_demo: + diff: yes + register: _result_with_diff +- assert: + that: + - _result_with_diff.diff is defined +- cloud.common.turbo_demo: + diff: no + register: _result_no_diff +- assert: + that: + - _result_no_diff.diff is undefined + +- name: Test task environment var + cloud.common.turbo_demo: + environment: + TURBO_TEST_VAR: foobar + register: _result + +- assert: + that: + - _result.envvar == "foobar" + +- name: Test task environment var not set + cloud.common.turbo_demo: + register: _result + +- assert: + that: + - not _result.envvar + + +- name: Create temporary dir + ansible.builtin.tempfile: + state: directory + suffix: temp + register: tempdir_1 + +- name: Test with a different remote_tmp, there is no socket yet. + cloud.common.turbo_demo: + vars: + ansible_remote_tmp: "{{ tempdir_1.path }}" + register: _result +- assert: + that: + - _result.counter == 1 + +- name: test using default remote_tmp. socket previously created + cloud.common.turbo_demo: + register: _result + +- assert: + that: + - _result.counter > 1 diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/inventory b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/inventory new file mode 100644 index 00000000..45c6d9bb --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/inventory @@ -0,0 +1,50 @@ +host1 ansible_connection=local +host2 ansible_connection=local +host3 ansible_connection=local +host4 ansible_connection=local +host5 ansible_connection=local +host6 ansible_connection=local +host7 ansible_connection=local +host8 ansible_connection=local +host9 ansible_connection=local +host10 ansible_connection=local +host11 ansible_connection=local +host12 ansible_connection=local +host13 ansible_connection=local +host14 ansible_connection=local +host15 ansible_connection=local +host16 ansible_connection=local +host17 ansible_connection=local +host18 ansible_connection=local +host19 ansible_connection=local +host20 ansible_connection=local +host21 ansible_connection=local +host22 ansible_connection=local +host23 ansible_connection=local +host24 ansible_connection=local +host25 ansible_connection=local +host26 ansible_connection=local +host27 ansible_connection=local +host28 ansible_connection=local +host29 ansible_connection=local +host30 ansible_connection=local +host31 ansible_connection=local +host32 ansible_connection=local +host33 ansible_connection=local +host34 ansible_connection=local +host35 ansible_connection=local +host36 ansible_connection=local +host37 ansible_connection=local +host38 ansible_connection=local +host39 ansible_connection=local +host40 ansible_connection=local +host41 ansible_connection=local +host42 ansible_connection=local +host43 ansible_connection=local +host44 ansible_connection=local +host45 ansible_connection=local +host46 ansible_connection=local +host47 ansible_connection=local +host48 ansible_connection=local +host49 ansible_connection=local +host50 ansible_connection=local diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/playbook.yaml b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/playbook.yaml new file mode 100644 index 00000000..da0fe0f3 --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/playbook.yaml @@ -0,0 +1,33 @@ +- hosts: localhost + gather_facts: no + tasks: + - cloud.common.turbo_demo: + - pause: + seconds: 5 + +- hosts: all + strategy: free + gather_facts: no + tasks: + - cloud.common.turbo_demo: + with_sequence: count=10 + - cloud.common.turbo_demo: + with_sequence: count=10 + +- hosts: localhost + gather_facts: no + tasks: + - cloud.common.turbo_demo: + register: _result + - debug: var=_result + - assert: + that: + - _result.counter == 1002 + - pause: + seconds: 35 + - cloud.common.turbo_demo: + register: _result + - debug: var=_result + - assert: + that: + - _result.counter == 1 diff --git a/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/runme.sh b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/runme.sh new file mode 100755 index 00000000..83a40fcf --- /dev/null +++ b/ansible_collections/cloud/common/tests/integration/targets/turbo_mode_parallel_exec/runme.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -eux +export ANSIBLE_ROLES_PATH=.. +exec ansible-playbook -i inventory playbook.yaml diff --git a/ansible_collections/cloud/common/tests/sanity/ignore-2.10.txt b/ansible_collections/cloud/common/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..4f8d3f12 --- /dev/null +++ b/ansible_collections/cloud/common/tests/sanity/ignore-2.10.txt @@ -0,0 +1,76 @@ +plugins/module_utils/turbo/exceptions.py compile-2.6!skip +plugins/module_utils/turbo/exceptions.py compile-2.7!skip +plugins/module_utils/turbo/exceptions.py compile-3.5!skip +plugins/module_utils/turbo/exceptions.py future-import-boilerplate!skip +plugins/module_utils/turbo/exceptions.py metaclass-boilerplate!skip +plugins/module_utils/turbo/module.py compile-2.6!skip +plugins/module_utils/turbo/module.py compile-2.7!skip +plugins/module_utils/turbo/module.py compile-3.5!skip +plugins/module_utils/turbo/module.py future-import-boilerplate!skip +plugins/module_utils/turbo/module.py import-2.6!skip +plugins/module_utils/turbo/module.py import-2.7!skip +plugins/module_utils/turbo/module.py import-3.5!skip +plugins/module_utils/turbo/module.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py compile-2.6!skip +plugins/module_utils/turbo/server.py compile-2.7!skip +plugins/module_utils/turbo/server.py compile-3.5!skip +plugins/module_utils/turbo/server.py future-import-boilerplate!skip +plugins/module_utils/turbo/server.py import-2.6!skip +plugins/module_utils/turbo/server.py import-2.7!skip +plugins/module_utils/turbo/server.py import-3.5!skip +plugins/module_utils/turbo/server.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py pylint:ansible-bad-module-import +plugins/modules/turbo_demo.py compile-2.6!skip +plugins/modules/turbo_demo.py compile-2.7!skip +plugins/modules/turbo_demo.py compile-3.5!skip +plugins/modules/turbo_demo.py future-import-boilerplate!skip +plugins/modules/turbo_demo.py import-2.6!skip +plugins/modules/turbo_demo.py import-2.7!skip +plugins/modules/turbo_demo.py import-3.5!skip +plugins/modules/turbo_demo.py metaclass-boilerplate!skip +tests/unit/module_utils/test_turbo_module.py compile-2.6!skip +tests/unit/module_utils/test_turbo_module.py compile-2.7!skip +tests/unit/module_utils/test_turbo_module.py compile-3.5!skip +tests/unit/module_utils/test_turbo_module.py future-import-boilerplate!skip +tests/unit/module_utils/test_turbo_module.py metaclass-boilerplate!skip +plugins/modules/turbo_fail.py compile-2.6!skip +plugins/modules/turbo_fail.py compile-2.7!skip +plugins/modules/turbo_fail.py compile-3.5!skip +plugins/modules/turbo_fail.py future-import-boilerplate!skip +plugins/modules/turbo_fail.py import-2.6!skip +plugins/modules/turbo_fail.py import-2.7!skip +plugins/modules/turbo_fail.py import-3.5!skip +plugins/modules/turbo_fail.py metaclass-boilerplate!skip +plugins/plugin_utils/turbo/lookup.py compile-2.6!skip +plugins/plugin_utils/turbo/lookup.py compile-2.7!skip +plugins/plugin_utils/turbo/lookup.py compile-3.5!skip +plugins/plugin_utils/turbo/lookup.py compile-3.6!skip +plugins/plugin_utils/turbo/lookup.py compile-3.7!skip +plugins/plugin_utils/turbo/lookup.py compile-3.8!skip +plugins/plugin_utils/turbo/lookup.py future-import-boilerplate!skip +plugins/plugin_utils/turbo/lookup.py metaclass-boilerplate!skip +plugins/lookup/turbo_demo.py compile-2.6!skip +plugins/lookup/turbo_demo.py compile-2.7!skip +plugins/lookup/turbo_demo.py compile-3.5!skip +plugins/lookup/turbo_demo.py future-import-boilerplate!skip +plugins/lookup/turbo_demo.py metaclass-boilerplate!skip +plugins/module_utils/turbo/common.py compile-2.6!skip +plugins/module_utils/turbo/common.py compile-2.7!skip +plugins/module_utils/turbo/common.py compile-3.5!skip +plugins/module_utils/turbo/common.py future-import-boilerplate!skip +plugins/module_utils/turbo/common.py import-2.6!skip +plugins/module_utils/turbo/common.py import-2.7!skip +plugins/module_utils/turbo/common.py import-3.5!skip +plugins/module_utils/turbo/common.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py metaclass-boilerplate!skip +plugins/modules/turbo_import.py compile-2.6!skip +plugins/modules/turbo_import.py compile-2.7!skip +plugins/modules/turbo_import.py compile-3.5!skip +plugins/modules/turbo_import.py future-import-boilerplate!skip +plugins/modules/turbo_import.py import-2.6!skip +plugins/modules/turbo_import.py import-2.7!skip +plugins/modules/turbo_import.py import-3.5!skip +plugins/modules/turbo_import.py metaclass-boilerplate!skip diff --git a/ansible_collections/cloud/common/tests/sanity/ignore-2.11.txt b/ansible_collections/cloud/common/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..3afe6f82 --- /dev/null +++ b/ansible_collections/cloud/common/tests/sanity/ignore-2.11.txt @@ -0,0 +1,84 @@ +plugins/module_utils/turbo/exceptions.py compile-2.6!skip +plugins/module_utils/turbo/exceptions.py compile-2.7!skip +plugins/module_utils/turbo/exceptions.py compile-3.5!skip +plugins/module_utils/turbo/exceptions.py future-import-boilerplate!skip +plugins/module_utils/turbo/exceptions.py metaclass-boilerplate!skip +plugins/module_utils/turbo/module.py compile-2.6!skip +plugins/module_utils/turbo/module.py compile-2.7!skip +plugins/module_utils/turbo/module.py compile-3.5!skip +plugins/module_utils/turbo/module.py future-import-boilerplate!skip +plugins/module_utils/turbo/module.py import-2.6!skip +plugins/module_utils/turbo/module.py import-2.7!skip +plugins/module_utils/turbo/module.py import-3.5!skip +plugins/module_utils/turbo/module.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py compile-2.6!skip +plugins/module_utils/turbo/server.py compile-2.7!skip +plugins/module_utils/turbo/server.py compile-3.5!skip +plugins/module_utils/turbo/server.py future-import-boilerplate!skip +plugins/module_utils/turbo/server.py import-2.6!skip +plugins/module_utils/turbo/server.py import-2.7!skip +plugins/module_utils/turbo/server.py import-3.5!skip +plugins/module_utils/turbo/server.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py pylint:ansible-bad-module-import +plugins/modules/turbo_demo.py compile-2.6!skip +plugins/modules/turbo_demo.py compile-2.7!skip +plugins/modules/turbo_demo.py compile-3.5!skip +plugins/modules/turbo_demo.py future-import-boilerplate!skip +plugins/modules/turbo_demo.py import-2.6!skip +plugins/modules/turbo_demo.py import-2.7!skip +plugins/modules/turbo_demo.py import-3.5!skip +plugins/modules/turbo_demo.py metaclass-boilerplate!skip +tests/unit/module_utils/test_turbo_module.py compile-2.6!skip +tests/unit/module_utils/test_turbo_module.py compile-2.7!skip +tests/unit/module_utils/test_turbo_module.py compile-3.5!skip +tests/unit/module_utils/test_turbo_module.py future-import-boilerplate!skip +tests/unit/module_utils/test_turbo_module.py metaclass-boilerplate!skip +plugins/modules/turbo_fail.py compile-2.6!skip +plugins/modules/turbo_fail.py compile-2.7!skip +plugins/modules/turbo_fail.py compile-3.5!skip +plugins/modules/turbo_fail.py future-import-boilerplate!skip +plugins/modules/turbo_fail.py import-2.6!skip +plugins/modules/turbo_fail.py import-2.7!skip +plugins/modules/turbo_fail.py import-3.5!skip +plugins/modules/turbo_fail.py metaclass-boilerplate!skip +plugins/plugin_utils/turbo/lookup.py compile-2.6!skip +plugins/plugin_utils/turbo/lookup.py compile-2.7!skip +plugins/plugin_utils/turbo/lookup.py compile-3.5!skip +plugins/plugin_utils/turbo/lookup.py compile-3.6!skip +plugins/plugin_utils/turbo/lookup.py compile-3.7!skip +plugins/plugin_utils/turbo/lookup.py compile-3.8!skip +plugins/plugin_utils/turbo/lookup.py future-import-boilerplate!skip +plugins/plugin_utils/turbo/lookup.py import-2.6!skip +plugins/plugin_utils/turbo/lookup.py import-2.7!skip +plugins/plugin_utils/turbo/lookup.py import-3.5!skip +plugins/plugin_utils/turbo/lookup.py import-3.6!skip +plugins/plugin_utils/turbo/lookup.py import-3.7!skip +plugins/plugin_utils/turbo/lookup.py import-3.8!skip +plugins/plugin_utils/turbo/lookup.py import-3.9!skip +plugins/plugin_utils/turbo/lookup.py metaclass-boilerplate!skip +plugins/lookup/turbo_demo.py compile-2.6!skip +plugins/lookup/turbo_demo.py compile-2.7!skip +plugins/lookup/turbo_demo.py compile-3.5!skip +plugins/lookup/turbo_demo.py import-2.7!skip +plugins/lookup/turbo_demo.py future-import-boilerplate!skip +plugins/lookup/turbo_demo.py metaclass-boilerplate!skip +plugins/module_utils/turbo/common.py compile-2.6!skip +plugins/module_utils/turbo/common.py compile-2.7!skip +plugins/module_utils/turbo/common.py compile-3.5!skip +plugins/module_utils/turbo/common.py future-import-boilerplate!skip +plugins/module_utils/turbo/common.py import-2.6!skip +plugins/module_utils/turbo/common.py import-2.7!skip +plugins/module_utils/turbo/common.py import-3.5!skip +plugins/module_utils/turbo/common.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py metaclass-boilerplate!skip +plugins/modules/turbo_import.py compile-2.6!skip +plugins/modules/turbo_import.py compile-2.7!skip +plugins/modules/turbo_import.py compile-3.5!skip +plugins/modules/turbo_import.py future-import-boilerplate!skip +plugins/modules/turbo_import.py import-2.6!skip +plugins/modules/turbo_import.py import-2.7!skip +plugins/modules/turbo_import.py import-3.5!skip +plugins/modules/turbo_import.py metaclass-boilerplate!skip diff --git a/ansible_collections/cloud/common/tests/sanity/ignore-2.12.txt b/ansible_collections/cloud/common/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..cca703bf --- /dev/null +++ b/ansible_collections/cloud/common/tests/sanity/ignore-2.12.txt @@ -0,0 +1,60 @@ +plugins/module_utils/turbo/exceptions.py compile-2.6!skip +plugins/module_utils/turbo/exceptions.py compile-2.7!skip +plugins/module_utils/turbo/exceptions.py compile-3.5!skip +plugins/module_utils/turbo/exceptions.py future-import-boilerplate!skip +plugins/module_utils/turbo/exceptions.py metaclass-boilerplate!skip +plugins/module_utils/turbo/module.py compile-2.6!skip +plugins/module_utils/turbo/module.py compile-2.7!skip +plugins/module_utils/turbo/module.py compile-3.5!skip +plugins/module_utils/turbo/module.py future-import-boilerplate!skip +plugins/module_utils/turbo/module.py import-2.6!skip +plugins/module_utils/turbo/module.py import-2.7!skip +plugins/module_utils/turbo/module.py import-3.5!skip +plugins/module_utils/turbo/module.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py compile-2.6!skip +plugins/module_utils/turbo/server.py compile-2.7!skip +plugins/module_utils/turbo/server.py compile-3.5!skip +plugins/module_utils/turbo/server.py future-import-boilerplate!skip +plugins/module_utils/turbo/server.py import-2.6!skip +plugins/module_utils/turbo/server.py import-2.7!skip +plugins/module_utils/turbo/server.py import-3.5!skip +plugins/module_utils/turbo/server.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py pylint:ansible-bad-module-import +plugins/modules/turbo_demo.py compile-2.6!skip +plugins/modules/turbo_demo.py compile-2.7!skip +plugins/modules/turbo_demo.py compile-3.5!skip +plugins/modules/turbo_demo.py future-import-boilerplate!skip +plugins/modules/turbo_demo.py import-2.6!skip +plugins/modules/turbo_demo.py import-2.7!skip +plugins/modules/turbo_demo.py import-3.5!skip +plugins/modules/turbo_demo.py metaclass-boilerplate!skip +plugins/modules/turbo_fail.py compile-2.6!skip +plugins/modules/turbo_fail.py compile-2.7!skip +plugins/modules/turbo_fail.py compile-3.5!skip +plugins/modules/turbo_fail.py future-import-boilerplate!skip +plugins/modules/turbo_fail.py import-2.6!skip +plugins/modules/turbo_fail.py import-2.7!skip +plugins/modules/turbo_fail.py import-3.5!skip +plugins/modules/turbo_fail.py metaclass-boilerplate!skip +plugins/module_utils/turbo/common.py compile-2.6!skip +plugins/module_utils/turbo/common.py compile-2.7!skip +plugins/module_utils/turbo/common.py compile-3.5!skip +plugins/module_utils/turbo/common.py future-import-boilerplate!skip +plugins/module_utils/turbo/common.py import-2.6!skip +plugins/module_utils/turbo/common.py import-2.7!skip +plugins/module_utils/turbo/common.py import-3.5!skip +plugins/module_utils/turbo/common.py metaclass-boilerplate!skip +plugins/modules/turbo_demo.py validate-modules!skip +plugins/modules/turbo_fail.py validate-modules!skip +tests/unit/plugins/module_utils/turbo/conftest.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py metaclass-boilerplate!skip +plugins/modules/turbo_import.py compile-2.6!skip +plugins/modules/turbo_import.py compile-2.7!skip +plugins/modules/turbo_import.py compile-3.5!skip +plugins/modules/turbo_import.py future-import-boilerplate!skip +plugins/modules/turbo_import.py import-2.6!skip +plugins/modules/turbo_import.py import-2.7!skip +plugins/modules/turbo_import.py import-3.5!skip +plugins/modules/turbo_import.py metaclass-boilerplate!skip diff --git a/ansible_collections/cloud/common/tests/sanity/ignore-2.13.txt b/ansible_collections/cloud/common/tests/sanity/ignore-2.13.txt new file mode 100644 index 00000000..fe7e2516 --- /dev/null +++ b/ansible_collections/cloud/common/tests/sanity/ignore-2.13.txt @@ -0,0 +1,47 @@ +plugins/module_utils/turbo/exceptions.py compile-2.7!skip +plugins/module_utils/turbo/exceptions.py compile-3.5!skip +plugins/module_utils/turbo/exceptions.py future-import-boilerplate!skip +plugins/module_utils/turbo/exceptions.py metaclass-boilerplate!skip +plugins/module_utils/turbo/module.py compile-2.7!skip +plugins/module_utils/turbo/module.py compile-3.5!skip +plugins/module_utils/turbo/module.py future-import-boilerplate!skip +plugins/module_utils/turbo/module.py import-2.7!skip +plugins/module_utils/turbo/module.py import-3.5!skip +plugins/module_utils/turbo/module.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py compile-2.7!skip +plugins/module_utils/turbo/server.py compile-3.5!skip +plugins/module_utils/turbo/server.py future-import-boilerplate!skip +plugins/module_utils/turbo/server.py import-2.7!skip +plugins/module_utils/turbo/server.py import-3.5!skip +plugins/module_utils/turbo/server.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py pylint:ansible-bad-module-import +plugins/modules/turbo_demo.py compile-2.7!skip +plugins/modules/turbo_demo.py compile-3.5!skip +plugins/modules/turbo_demo.py future-import-boilerplate!skip +plugins/modules/turbo_demo.py import-2.7!skip +plugins/modules/turbo_demo.py import-3.5!skip +plugins/modules/turbo_demo.py metaclass-boilerplate!skip +plugins/modules/turbo_fail.py compile-2.7!skip +plugins/modules/turbo_fail.py compile-3.5!skip +plugins/modules/turbo_fail.py future-import-boilerplate!skip +plugins/modules/turbo_fail.py import-2.7!skip +plugins/modules/turbo_fail.py import-3.5!skip +plugins/modules/turbo_fail.py metaclass-boilerplate!skip +plugins/module_utils/turbo/common.py compile-2.7!skip +plugins/module_utils/turbo/common.py compile-3.5!skip +plugins/module_utils/turbo/common.py future-import-boilerplate!skip +plugins/module_utils/turbo/common.py import-2.7!skip +plugins/module_utils/turbo/common.py import-3.5!skip +plugins/module_utils/turbo/common.py metaclass-boilerplate!skip +plugins/modules/turbo_demo.py validate-modules!skip +plugins/modules/turbo_fail.py validate-modules!skip +tests/unit/plugins/module_utils/turbo/conftest.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py metaclass-boilerplate!skip +plugins/modules/turbo_import.py compile-2.7!skip +plugins/modules/turbo_import.py compile-3.5!skip +plugins/modules/turbo_import.py future-import-boilerplate!skip +plugins/modules/turbo_import.py import-2.7!skip +plugins/modules/turbo_import.py import-3.5!skip +plugins/modules/turbo_import.py metaclass-boilerplate!skip diff --git a/ansible_collections/cloud/common/tests/sanity/ignore-2.14.txt b/ansible_collections/cloud/common/tests/sanity/ignore-2.14.txt new file mode 100644 index 00000000..b27ed13a --- /dev/null +++ b/ansible_collections/cloud/common/tests/sanity/ignore-2.14.txt @@ -0,0 +1,41 @@ +plugins/module_utils/turbo/exceptions.py compile-2.7!skip +plugins/module_utils/turbo/exceptions.py compile-3.5!skip +plugins/module_utils/turbo/exceptions.py future-import-boilerplate!skip +plugins/module_utils/turbo/exceptions.py metaclass-boilerplate!skip +plugins/module_utils/turbo/module.py compile-2.7!skip +plugins/module_utils/turbo/module.py compile-3.5!skip +plugins/module_utils/turbo/module.py future-import-boilerplate!skip +plugins/module_utils/turbo/module.py import-2.7!skip +plugins/module_utils/turbo/module.py import-3.5!skip +plugins/module_utils/turbo/module.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py compile-2.7!skip +plugins/module_utils/turbo/server.py compile-3.5!skip +plugins/module_utils/turbo/server.py future-import-boilerplate!skip +plugins/module_utils/turbo/server.py import-2.7!skip +plugins/module_utils/turbo/server.py import-3.5!skip +plugins/module_utils/turbo/server.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py pylint:ansible-bad-module-import +plugins/modules/turbo_demo.py compile-2.7!skip +plugins/modules/turbo_demo.py compile-3.5!skip +plugins/modules/turbo_demo.py future-import-boilerplate!skip +plugins/modules/turbo_demo.py import-2.7!skip +plugins/modules/turbo_demo.py import-3.5!skip +plugins/modules/turbo_demo.py metaclass-boilerplate!skip +plugins/modules/turbo_fail.py compile-2.7!skip +plugins/modules/turbo_fail.py compile-3.5!skip +plugins/modules/turbo_fail.py future-import-boilerplate!skip +plugins/modules/turbo_fail.py import-2.7!skip +plugins/modules/turbo_fail.py import-3.5!skip +plugins/modules/turbo_fail.py metaclass-boilerplate!skip +plugins/module_utils/turbo/common.py compile-2.7!skip +plugins/module_utils/turbo/common.py compile-3.5!skip +plugins/module_utils/turbo/common.py future-import-boilerplate!skip +plugins/module_utils/turbo/common.py import-2.7!skip +plugins/module_utils/turbo/common.py import-3.5!skip +plugins/module_utils/turbo/common.py metaclass-boilerplate!skip +plugins/modules/turbo_demo.py validate-modules!skip +plugins/modules/turbo_fail.py validate-modules!skip +tests/unit/plugins/module_utils/turbo/conftest.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py metaclass-boilerplate!skip diff --git a/ansible_collections/cloud/common/tests/sanity/ignore-2.9.txt b/ansible_collections/cloud/common/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..4f8d3f12 --- /dev/null +++ b/ansible_collections/cloud/common/tests/sanity/ignore-2.9.txt @@ -0,0 +1,76 @@ +plugins/module_utils/turbo/exceptions.py compile-2.6!skip +plugins/module_utils/turbo/exceptions.py compile-2.7!skip +plugins/module_utils/turbo/exceptions.py compile-3.5!skip +plugins/module_utils/turbo/exceptions.py future-import-boilerplate!skip +plugins/module_utils/turbo/exceptions.py metaclass-boilerplate!skip +plugins/module_utils/turbo/module.py compile-2.6!skip +plugins/module_utils/turbo/module.py compile-2.7!skip +plugins/module_utils/turbo/module.py compile-3.5!skip +plugins/module_utils/turbo/module.py future-import-boilerplate!skip +plugins/module_utils/turbo/module.py import-2.6!skip +plugins/module_utils/turbo/module.py import-2.7!skip +plugins/module_utils/turbo/module.py import-3.5!skip +plugins/module_utils/turbo/module.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py compile-2.6!skip +plugins/module_utils/turbo/server.py compile-2.7!skip +plugins/module_utils/turbo/server.py compile-3.5!skip +plugins/module_utils/turbo/server.py future-import-boilerplate!skip +plugins/module_utils/turbo/server.py import-2.6!skip +plugins/module_utils/turbo/server.py import-2.7!skip +plugins/module_utils/turbo/server.py import-3.5!skip +plugins/module_utils/turbo/server.py metaclass-boilerplate!skip +plugins/module_utils/turbo/server.py pylint:ansible-bad-module-import +plugins/modules/turbo_demo.py compile-2.6!skip +plugins/modules/turbo_demo.py compile-2.7!skip +plugins/modules/turbo_demo.py compile-3.5!skip +plugins/modules/turbo_demo.py future-import-boilerplate!skip +plugins/modules/turbo_demo.py import-2.6!skip +plugins/modules/turbo_demo.py import-2.7!skip +plugins/modules/turbo_demo.py import-3.5!skip +plugins/modules/turbo_demo.py metaclass-boilerplate!skip +tests/unit/module_utils/test_turbo_module.py compile-2.6!skip +tests/unit/module_utils/test_turbo_module.py compile-2.7!skip +tests/unit/module_utils/test_turbo_module.py compile-3.5!skip +tests/unit/module_utils/test_turbo_module.py future-import-boilerplate!skip +tests/unit/module_utils/test_turbo_module.py metaclass-boilerplate!skip +plugins/modules/turbo_fail.py compile-2.6!skip +plugins/modules/turbo_fail.py compile-2.7!skip +plugins/modules/turbo_fail.py compile-3.5!skip +plugins/modules/turbo_fail.py future-import-boilerplate!skip +plugins/modules/turbo_fail.py import-2.6!skip +plugins/modules/turbo_fail.py import-2.7!skip +plugins/modules/turbo_fail.py import-3.5!skip +plugins/modules/turbo_fail.py metaclass-boilerplate!skip +plugins/plugin_utils/turbo/lookup.py compile-2.6!skip +plugins/plugin_utils/turbo/lookup.py compile-2.7!skip +plugins/plugin_utils/turbo/lookup.py compile-3.5!skip +plugins/plugin_utils/turbo/lookup.py compile-3.6!skip +plugins/plugin_utils/turbo/lookup.py compile-3.7!skip +plugins/plugin_utils/turbo/lookup.py compile-3.8!skip +plugins/plugin_utils/turbo/lookup.py future-import-boilerplate!skip +plugins/plugin_utils/turbo/lookup.py metaclass-boilerplate!skip +plugins/lookup/turbo_demo.py compile-2.6!skip +plugins/lookup/turbo_demo.py compile-2.7!skip +plugins/lookup/turbo_demo.py compile-3.5!skip +plugins/lookup/turbo_demo.py future-import-boilerplate!skip +plugins/lookup/turbo_demo.py metaclass-boilerplate!skip +plugins/module_utils/turbo/common.py compile-2.6!skip +plugins/module_utils/turbo/common.py compile-2.7!skip +plugins/module_utils/turbo/common.py compile-3.5!skip +plugins/module_utils/turbo/common.py future-import-boilerplate!skip +plugins/module_utils/turbo/common.py import-2.6!skip +plugins/module_utils/turbo/common.py import-2.7!skip +plugins/module_utils/turbo/common.py import-3.5!skip +plugins/module_utils/turbo/common.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/conftest.py metaclass-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py future-import-boilerplate!skip +tests/unit/plugins/module_utils/turbo/test_module.py metaclass-boilerplate!skip +plugins/modules/turbo_import.py compile-2.6!skip +plugins/modules/turbo_import.py compile-2.7!skip +plugins/modules/turbo_import.py compile-3.5!skip +plugins/modules/turbo_import.py future-import-boilerplate!skip +plugins/modules/turbo_import.py import-2.6!skip +plugins/modules/turbo_import.py import-2.7!skip +plugins/modules/turbo_import.py import-3.5!skip +plugins/modules/turbo_import.py metaclass-boilerplate!skip diff --git a/ansible_collections/cloud/common/tests/unit/module_utils/test_turbo_module.py b/ansible_collections/cloud/common/tests/unit/module_utils/test_turbo_module.py new file mode 100644 index 00000000..00756a0d --- /dev/null +++ b/ansible_collections/cloud/common/tests/unit/module_utils/test_turbo_module.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# py38 only, See: https://github.com/PyCQA/pylint/issues/2976 +from posixpath import dirname +from unittest.mock import Mock, ANY # pylint: disable=syntax-error +import time +import pytest +import socket +import subprocess +import os +import ansible.module_utils.basic +from ansible_collections.cloud.common.plugins.module_utils.turbo.module import ( + get_collection_name_from_path, + expand_argument_specs_aliases, + prepare_args, +) +import ansible_collections.cloud.common.plugins.module_utils.turbo.common as turbo_common + + +@pytest.mark.parametrize( + "my_module_path,my_collection_name", + [ + ( + "/tmp/ansible_vmware.vmware_rest.vcenter_vm_info_payload_548h2lev/ansible_vmware.vmware_rest.vcenter_vm_info_payload.zip/ansible/module_utils", + "vmware.vmware_rest", + ) + ], +) +def test_collection_name(monkeypatch, my_module_path, my_collection_name): + def mocked_func(): + return my_module_path + + monkeypatch.setattr(ansible.module_utils.basic, "get_module_path", mocked_func) + assert get_collection_name_from_path() == my_collection_name + + +def test_start_daemon_from_module(monkeypatch): + mocked_Popen = Mock() + monkeypatch.setattr(subprocess, "Popen", mocked_Popen) + turbo_socket = turbo_common.AnsibleTurboSocket(socket_path="/aa") + assert turbo_socket.start_server() + mocked_Popen.assert_called_once_with( + [ + ANY, + "-m", + "ansible_collections.cloud.common.plugins.module_utils.turbo.server", + "--fork", + "--socket-path", + "/aa", + ], + env=ANY, + close_fds=True, + ) + + +def test_start_daemon_from_lookup(monkeypatch): + mocked_Popen = Mock() + monkeypatch.setattr(subprocess, "Popen", mocked_Popen) + turbo_socket = turbo_common.AnsibleTurboSocket( + socket_path="/aa", plugin="lookup", ttl=150 + ) + assert turbo_socket.start_server() + mocked_Popen.assert_called_once_with( + [ + ANY, + os.path.join(os.path.dirname(turbo_common.__file__), "server.py"), + "--fork", + "--socket-path", + "/aa", + "--ttl", + "150", + ], + env=ANY, + close_fds=True, + ) + + +def test_start_daemon_with_no_mock(tmp_path): + my_socket = tmp_path / "socket" + turbo_socket = turbo_common.AnsibleTurboSocket(socket_path=str(my_socket), ttl=1) + assert turbo_socket.start_server() + time.sleep(0.5) + assert my_socket.is_socket() + time.sleep(0.8) + assert not my_socket.exists() + + +def test_connect(monkeypatch): + mocked_socket = Mock() + monkeypatch.setattr(socket, "socket", mocked_socket) + turbo_socket = turbo_common.AnsibleTurboSocket(socket_path="/nowhere") + assert turbo_socket.bind() + mocked_socket.connect_assert_called_once_with("/nowhere") + + +def test_expand_argument_specs_aliases(): + argspec = {"foo": {"type": int, "aliases": ["bar"]}} + assert expand_argument_specs_aliases(argspec) == { + "foo": {"type": int, "aliases": ["bar"]}, + "bar": {"type": int, "aliases": ["bar"]}, + } + + +def test_prepare_args(): + argspec = {"foo": {"type": int}} + params = {"foo": 1} + assert prepare_args(argspec, params) == {"ANSIBLE_MODULE_ARGS": {"foo": 1}} + + +def test_prepare_args_ignore_none(): + argspec = {"foo": {"type": int}} + params = {"foo": None} + assert prepare_args(argspec, params) == {"ANSIBLE_MODULE_ARGS": {}} + + +def test_prepare_args_subkey_freeform(): + argspec = {"foo": {"type": dict, "default": {}}} + params = {"foo": {"bar": 1}} + assert prepare_args(argspec, params) == {"ANSIBLE_MODULE_ARGS": {"foo": {"bar": 1}}} + + +def test_prepare_args_subkey_with_default(): + argspec = {"foo": {"bar": {"default": 1}}} + params = {"foo": {"bar": 1}} + assert prepare_args(argspec, params) == {"ANSIBLE_MODULE_ARGS": {"foo": {}}} + + +def test_prepare_args_dedup_aliases(): + argspec = {"foo": {"aliases": ["bar"], "type": int}} + params = {"foo": 1, "bar": 1} + assert prepare_args(argspec, params) == {"ANSIBLE_MODULE_ARGS": {"foo": 1}} + + +def test_prepare_args_with_aliases(): + argspec = {"foo": {"aliases": ["bar"], "type": int}} + params = {"foo": 1} + assert prepare_args(argspec, params) == {"ANSIBLE_MODULE_ARGS": {"foo": 1}} diff --git a/ansible_collections/cloud/common/tests/unit/plugins/module_utils/turbo/conftest.py b/ansible_collections/cloud/common/tests/unit/plugins/module_utils/turbo/conftest.py new file mode 100644 index 00000000..8f8f44e7 --- /dev/null +++ b/ansible_collections/cloud/common/tests/unit/plugins/module_utils/turbo/conftest.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2021 XLAB Steampunk +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import json + +import pytest + +from ansible.module_utils import basic +from ansible.module_utils.common.text.converters import to_bytes + + +@pytest.fixture +def set_module_args(monkeypatch): + def wrapper(args=None): + module_args = dict(ANSIBLE_MODULE_ARGS=args or {}) + monkeypatch.setattr(basic, "_ANSIBLE_ARGS", to_bytes(json.dumps(module_args))) + + return wrapper diff --git a/ansible_collections/cloud/common/tests/unit/plugins/module_utils/turbo/test_module.py b/ansible_collections/cloud/common/tests/unit/plugins/module_utils/turbo/test_module.py new file mode 100644 index 00000000..61a02e14 --- /dev/null +++ b/ansible_collections/cloud/common/tests/unit/plugins/module_utils/turbo/test_module.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2021 XLAB Steampunk +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import sys + +import pytest + +from ansible_collections.cloud.common.plugins.module_utils.turbo.module import ( + AnsibleTurboModule, +) + + +def _patch_globals(monkeypatch): + # Patch sys.argv so that module does not try to spin up the server on + # initialization. The purpose is to make sure AnsibleTurboModule.embedded_in_server + # is set to True. + monkeypatch.setattr(sys, "argv", ["something/that/ends/on/server.py"]) + + # Collection name detection will fail in unit tests, so we patch it here directly + # and bypass the detection process. + monkeypatch.setattr(AnsibleTurboModule, "collection_name", "namespace.name") + + +def test_module_socket_path_remote_tmp_not_set(monkeypatch, set_module_args): + _patch_globals(monkeypatch) + set_module_args() + module = AnsibleTurboModule(argument_spec={}) + + path = module.socket_path() + + # We cannot know what tmp dir python uses, but we do know that it is a full path + # that ends with deterministc suffix. + assert path.startswith("/") + assert path.endswith("/turbo_mode.namespace.name.socket") + + +@pytest.mark.parametrize("tmp_path", ["/tmp", "/tmp/"]) +def test_module_socket_path_from_remote_tmp(monkeypatch, set_module_args, tmp_path): + _patch_globals(monkeypatch) + set_module_args(dict(_ansible_remote_tmp=tmp_path)) + module = AnsibleTurboModule(argument_spec={}) + + assert module.socket_path() == "/tmp/turbo_mode.namespace.name.socket" + + +@pytest.mark.parametrize( + "tmp_path", ["/t/$MY_VAR", "/t/${MY_VAR}", "/t/$MY_VAR/", "/t/${MY_VAR}/"] +) +def test_module_socket_path_env_vars_in_remote_tmp( + monkeypatch, set_module_args, tmp_path +): + _patch_globals(monkeypatch) + set_module_args(dict(_ansible_remote_tmp=tmp_path)) + monkeypatch.setenv("MY_VAR", "my_var_value") + module = AnsibleTurboModule(argument_spec={}) + + assert module.socket_path() == "/t/my_var_value/turbo_mode.namespace.name.socket" diff --git a/ansible_collections/cloud/common/tests/unit/requirements.txt b/ansible_collections/cloud/common/tests/unit/requirements.txt new file mode 100644 index 00000000..2975a6e9 --- /dev/null +++ b/ansible_collections/cloud/common/tests/unit/requirements.txt @@ -0,0 +1,4 @@ +pytest +pytest-xdist +pytest-mock +mock diff --git a/ansible_collections/cloud/common/tox.ini b/ansible_collections/cloud/common/tox.ini new file mode 100644 index 00000000..77919343 --- /dev/null +++ b/ansible_collections/cloud/common/tox.ini @@ -0,0 +1,38 @@ +[tox] +minversion = 1.4.2 +envlist = linters +skipsdist = True + +[testenv] +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +install_command = pip install {opts} {packages} + +[testenv:black] +deps = black +commands = + black {toxinidir}/plugins {toxinidir}/tests/unit/ + +[testenv:linters] +install_command = pip install {opts} {packages} +deps = + black + flake8 +commands = + black -v --check {toxinidir}/plugins {toxinidir}/tests/unit/ + flake8 {posargs} {toxinidir}/plugins {toxinidir}/tests/unit/ + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. +show-source = True +ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 +max-line-length = 160 +builtins = _ +exclude = .git,.tox,tests/unit/compat/ + +[testenv:antsibull-changelog] +deps = + ansible-core==2.11.* + antsibull-changelog +commands = + antsibull-changelog {posargs} diff --git a/ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml b/ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml new file mode 100644 index 00000000..607e7e1a --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/.github/dependabot.yml @@ -0,0 +1,8 @@ +# Set update schedule for GitHub Actions +--- +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml new file mode 100644 index 00000000..f2333cff --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/cleanup.yml @@ -0,0 +1,35 @@ +name: Scheduled Cleanup + +on: + schedule: + - cron: '0 2 * * *' # UTC + +jobs: + cleanup: + name: Cleanup possible leftovers + runs-on: ubuntu-latest + steps: + - name: Ensure no other integration test is currently running + uses: softprops/turnstyle@v1 + timeout-minutes: 60 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + same-branch-only: false + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.8' + + - name: Run cleanup + run: | + pip install cloudscale-cli + for resource in objects-user server floating-ip server-group network; + do + echo "cleaning up $resource..."; + cloudscale $resource list --delete --force; + echo "...done"; + done + env: + CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }} diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml new file mode 100644 index 00000000..bf7e5e7b --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/publish-ansible-galaxy.yml @@ -0,0 +1,33 @@ +name: Publish release on Ansible Galaxy + +on: + release: + types: [published] + +jobs: + deploy: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ansible_collections/cloudscale_ch/cloud + steps: + - uses: actions/checkout@v3 + with: + path: ansible_collections/cloudscale_ch/cloud + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install ansible + + - name: Build and publish + env: + ANSIBLE_GALAXY_API_KEY: ${{ secrets.ANSIBLE_GALAXY_API_KEY }} + run: | + ansible-galaxy collection build . + ansible-galaxy collection publish *.tar.gz --api-key $ANSIBLE_GALAXY_API_KEY diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml new file mode 100644 index 00000000..a8510a39 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-integration.yml @@ -0,0 +1,107 @@ +name: Collection Integration tests + +on: + schedule: + - cron: "0 5 * * *" # UTC + push: + tags: "v*" + branches: + - "test/**" +jobs: + integration-test: + name: Integration test using Python ${{ matrix.python-version }} + runs-on: ubuntu-20.04 + defaults: + run: + working-directory: ansible_collections/cloudscale_ch/cloud + strategy: + max-parallel: 1 + fail-fast: false + matrix: + ansible: + - stable-2.14 + python: + - 3.10 + steps: + - name: Check out code + uses: actions/checkout@v3 + with: + path: ansible_collections/cloudscale_ch/cloud + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install ansible and collection dependencies + run: | + python -m pip install --upgrade pip + pip install ansible + pip install -r tests/requirements.txt + + - name: Build and install collection + run: | + ansible-galaxy collection build . + ansible-galaxy collection install *.gz + + - name: Add config file + env: + CONFIG_FILE: ${{ secrets.CONFIG_FILE }} + INI_FILE: tests/integration/cloud-config-cloudscale.ini + run: | + echo -n "$CONFIG_FILE" > $INI_FILE && [ -s $INI_FILE ] || (>&2 echo no secrets provided; exit 1) + + - name: Ensure no other integration test is currently running + uses: softprops/turnstyle@v1 + timeout-minutes: 60 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + same-branch-only: false + + - name: Run the tests + run: >- + ansible-test + integration + --docker + -v + --diff + --color + --allow-unsupported + --continue-on-error + --coverage + cloud/cloudscale/ + + - name: Generate coverage report. + run: >- + ansible-test + coverage xml + -v + --requirements + --group-by command + --group-by version + + - uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: false + + - name: Send mail in case of failure + id: send_mail + if: ${{ failure() && github.event_name == 'schedule' }} + shell: python3 {0} + run: | + from smtplib import SMTP + from email.message import EmailMessage + + email = EmailMessage() + email['TO'] = '${{ secrets.CRON_RCPT }}' + email['FROM'] = 'noreply@github.com' + email['Subject'] = 'Ansible Cloud Module Integration Test Failure' + email.set_content(""" + Integration tests using Python ${{ matrix.python-version }} failed: + https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + """) + + with SMTP('${{ secrets.MAILSERVER }}') as smtp: + smtp.starttls() + smtp.send_message(email) diff --git a/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml new file mode 100644 index 00000000..02f26ff0 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/.github/workflows/test-sanity.yml @@ -0,0 +1,39 @@ +name: Sanity tests +on: + schedule: + - cron: "32 5 * * *" + pull_request: + +jobs: + sanity: + name: Sanity tests (${{ matrix.ansible }}) + defaults: + run: + working-directory: ansible_collections/cloudscale_ch/cloud + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - stable-2.14 + - devel + runs-on: ubuntu-20.04 + steps: + - name: Check out code + uses: actions/checkout@v3 + with: + path: ansible_collections/cloudscale_ch/cloud + + - name: Set up Python 3 + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color diff --git a/ansible_collections/cloudscale_ch/cloud/.gitignore b/ansible_collections/cloudscale_ch/cloud/.gitignore new file mode 100644 index 00000000..dadcc76e --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/.gitignore @@ -0,0 +1,4 @@ +*.pyc +*.tar.gz +tests/integration/cloud-config-cloudscale.ini +tests/output/ diff --git a/ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst b/ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst new file mode 100644 index 00000000..23f031cc --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/CHANGELOG.rst @@ -0,0 +1,135 @@ +============================================== +Ansible Collection cloudscale.ch Release Notes +============================================== + +.. contents:: Topics + + +v2.2.4 +====== + +Minor Changes +------------- + +- Add UEFI firmware type option for custom images. + +v2.2.3 +====== + +Minor Changes +------------- + +- Fixed a typo in region code. +- Fixed various documentation typos. +- Streamlined the flavors to the new format ``flex-y-x`` across the related modules and tests. + +v2.2.2 +====== + +Minor Changes +------------- + +- Fixed inventory documentation. + +v2.2.1 +====== + +Minor Changes +------------- + +- Updated documentation: ``ssh_keys`` is a YAML list, not a string. + +v2.2.0 +====== + +Major Changes +------------- + +- Add custom_image module + +Minor Changes +------------- + +- Increase api_timeout to 45 +- Read CLOUDSCALE_API_TIMEOUT environment variable + +New Modules +----------- + +- custom_image - Manage custom images on the cloudscale.ch IaaS service + +v2.1.0 +====== + +Minor Changes +------------- + +- Add interface parameter to server module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54). +- Rename server_uuids parameter to servers in volume module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54). + +Deprecated Features +------------------- + +- The aliases ``server_uuids`` and ``server_uuid`` of the servers parameter in the volume module will be removed in version 3.0.0. + +v2.0.0 +====== + +Breaking Changes / Porting Guide +-------------------------------- + +- floating_ip - ``name`` is required for assigning a new floating IP. + +v1.3.1 +====== + +Minor Changes +------------- + +- Implemented identical naming support of the same resource type per zone (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/46). + +Bugfixes +-------- + +- Fix inventory plugin failing to launch (https://github.com/cloudscale-ch/ansible-collection-cloudscale/issues/49). + +v1.3.0 +====== + +Minor Changes +------------- + +- floating_ip - Added an optional name parameter to gain idempotency. The parameter will be required for assigning a new floating IP with release of version 2.0.0 (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/43/). +- floating_ip - Allow to reserve an IP without assignment to a server (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/31/). + +New Modules +----------- + +- subnet - Manages subnets on the cloudscale.ch IaaS service + +v1.2.0 +====== + +Minor Changes +------------- + +- server_group - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/23). +- volume - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/24). + +New Modules +----------- + +- network - Manages networks on the cloudscale.ch IaaS service + +v1.1.0 +====== + +Minor Changes +------------- + +- floating_ip - added tags support (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/16) + +New Modules +----------- + +- objects_user - Manages objects users on the cloudscale.ch IaaS service diff --git a/ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md b/ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md new file mode 100644 index 00000000..1b2b6da2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/CONTRIBUTING.md @@ -0,0 +1,6 @@ +# Contributing + +Any contribution is welcome and we only ask contributors to: + +- Create an issue for any significant contribution that would change a large portion of the code base +- Provide at least integration tests for any contribution diff --git a/ansible_collections/cloudscale_ch/cloud/COPYING b/ansible_collections/cloudscale_ch/cloud/COPYING new file mode 100644 index 00000000..94a04532 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/COPYING @@ -0,0 +1,621 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS diff --git a/ansible_collections/cloudscale_ch/cloud/FILES.json b/ansible_collections/cloudscale_ch/cloud/FILES.json new file mode 100644 index 00000000..d6a6d654 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/FILES.json @@ -0,0 +1,943 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c61f12da7cdad526bdcbed47a4c0a603e60dbbfdaf8b66933cd088e9132c303f", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d4aa05cbe9026ed27d332e8c9609149790b925290aa4c59594b2b687f30c3ac", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/test-sanity.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a188ed15150521c535f5e1284e783c9ed3b352c7cc388f19b2720f9397b1910d", + "format": 1 + }, + { + "name": ".github/workflows/test-integration.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9f92c8e02bc65c4bd3ec14cb789ea9bb0f6c89a41111fcfa5359179559d0836", + "format": 1 + }, + { + "name": ".github/workflows/cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d176e1f384ac6ca05fc5b29640bc9b0866410121e9efff2a26c42cdc491e5c49", + "format": 1 + }, + { + "name": ".github/workflows/publish-ansible-galaxy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7900c906da270b42918a7684f37093356fa49989061b113d956ccd5d9a1e79c", + "format": 1 + }, + { + "name": ".github/dependabot.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d207e80d10726360f2046d4b2473a3cfd9f9eca99590281fa39d88f78e745145", + "format": 1 + }, + { + "name": "codecov.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af6b28485318d71eee003780eb62183364d82f1a50917cdcaaa4ffb88a178d51", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/inventory/inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e44f742653062165562bfdded791e6047a267b05ea8fbba2f58ae9305effc5e", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/custom_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfa944ceb81f894683a532c07691751a485709c4b2caf9515ba55255b8c66b4d", + "format": 1 + }, + { + "name": "plugins/modules/subnet.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b2cb880ae29727265abf4a3908ed1fd4b70f172e3a04ee94940de3e6ab9d800", + "format": 1 + }, + { + "name": "plugins/modules/floating_ip.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eba753b038fe3fc425a97f312ae64c9c4031f61123977e5512c07820a842a199", + "format": 1 + }, + { + "name": "plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/modules/server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2464ae35c9994f2bcc9e2c19ca379cbfb11a9305316da845db8e7a1a635e30e", + "format": 1 + }, + { + "name": "plugins/modules/volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "735f7cb448f3f8bd5788d0b9db3bfe71b9f6351fb0b3ef1295b84adc15f70856", + "format": 1 + }, + { + "name": "plugins/modules/server_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8942f6d76b4f4957eb0e60b263184e33e09b88270770637cc1527029c44c3f7e", + "format": 1 + }, + { + "name": "plugins/modules/network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee0b344c580fbb0816486bff7a061cee8e110cbf5dae22f22e4a9a092ebd0eee", + "format": 1 + }, + { + "name": "plugins/modules/objects_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80b229b178c43aa1a62bcf2cad4b76486f059fc87a206a02a6cc613b953cfb8d", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/doc_fragments/api_parameters.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0e3046cf350435d812fd0ba4f40afa020bc4229dfc973461b524d0e06591072", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a70c4fbf0f665358230aa1384016e65a3d292cfb428901360bbeb1c07420a8e", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4da3851108cf118d70fb2d8d5bdd4052043ab824fd34d83932212ff433c44731", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "919ef00776e7d2ff349950ac4b806132aa9faf006e214d5285de54533e443b33", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "281f8e67542260c31e3da2659c50236ce74c2c727487179bc751fb568c49e56e", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e84977cc15a4da5d8fff95c7e262d4370a7955889c67a8f040d0e8b31c165aae", + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/.keep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6c817a7f8c0522150e2f8ffd9af76a78fc35026b9e0705bde9c4c64523c55e5", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "932ab1d19eba7c0d158a6d527f333eeaeab0f161fb5812f5347af1fdea100996", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a5a2e4665194900a0d24968456298eaac531c0b10a3fac4f10be5c58047c0e1", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d2d9b8da573fa0339395327d86dc979404d749f3e5c3e15110d46644dde4924", + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c349cbac5f1f476a19ca2a347f811fb5d74755358fb4e86249040e8aeba9f17b", + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/custom_image/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6766719eabd380f4239027c1da1663c6bca162716dcba27b82aa015c126765cc", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddcaa01aabe22df954c1c352a81f8456de8476be5e8aa3ec229e4a0698b19ec", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d8107e1bdb7deab792785788e8745b42588f71783cafcc2ce1a32d35c1c27ef", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/tasks/failures.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60128cb6423e9f2b21ba9e05cc670325b99d3b6fbbb807d3d4474e100c7c377a", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/tasks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d52e7e9120b1accdd689734b41e8bc34ecd81f74775c3a800f35d36c07ca63d", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/subnet/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c6f0d3942e38ede79365f096d78892a420172d300f5fea0f79f8f36767d06d0", + "format": 1 + }, + { + "name": "tests/integration/targets/server_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27d4b996ea78fddc8152d8dc5f2a87461ec8972baecb9a494b384c0f163b9bc3", + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f41b4068697edfbb80ffa3e1e171b82e016a72b17efc299c77283fe88f774124", + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/tasks/failures.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24758d9aae6a825817c2bdfe3314a557264ef5c640ae784b4e26fd07d2e677dc", + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/common", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/common/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e084a3683ef795d1cdbf5e9b253f2ca1f783ae0d0d6e47e419acbbc4fc80bbfa", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f4342705d85638595cc3dd584302e9c860bd83d383653c3d9fe283ee98fcf19", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_networks.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76183d1bd899652a5c8deea41b0f27ad64cd268ce3e7a8a69314c7193c677be8", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_server_groups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d92489602b40f14f4f3156a04b2a39841f01a3296b837365fd9887e25b84e3b8", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_objects_users.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52891380666cf92aeb5d8797cc3e5b0567772e2641714a775a05ccb2279d2a98", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_floating_ips.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bab4ca163d566a13f9abacdf179f59fc186ca160c777937fc7eb76616993c7f3", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb224f8cf56d2a24e81c14bf3b8974577d1d9aab78cb8c100fdc552055113c9c", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_servers.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58ea8194da127df1082eb88523f6b713d3d3e323a62279ed52a1e0e96efc6bf7", + "format": 1 + }, + { + "name": "tests/integration/targets/common/tasks/cleanup_custom_images.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9def0b75c5ff882cfd15504cbcaea755124887b5cfbc3670551c48e8f2f8524", + "format": 1 + }, + { + "name": "tests/integration/targets/common/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/common/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63dd078899b099fdd988defcad94a7514f83827057caefc2b4cb9dd31188ce32", + "format": 1 + }, + { + "name": "tests/integration/targets/network", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/network/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/network/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/network/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be793a4af2cbb5f5398e0ba3fae822b4a6f76be183f69163f679cf6ca888ff3f", + "format": 1 + }, + { + "name": "tests/integration/targets/network/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da1e8e05fe4586478da692ee2f38b07b8691dafeea6d46f770f58b51260df7b0", + "format": 1 + }, + { + "name": "tests/integration/targets/network/tasks/failures.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8e9848f5767e67ce778eca1bdd5044403bccd43586143644d5a6e678f826fb", + "format": 1 + }, + { + "name": "tests/integration/targets/network/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/network/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46e001acebb586b961302c5e8876eab0fe2a2c136b7f026da04014df64b30acb", + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e3142fac71c2169fa650af4216cf5d69eef1d80bfe020fbe74549d997d8e1c3", + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/tasks/failures.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f522a4d67cfc9d074d2e2e9f1a6d0aa274e87712fddb00905ea9262f39f472f3", + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/objects_user/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/volume", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/volume/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3007eefc5cb739910601691a7f66c4f9c571f1e32b033346563fd5499cd83e8c", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "508756b3b7948faef7ec9aa96a8449da255578b6b657523c442180879b49489c", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks/deprecation_warning.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "639841041872b51e9e1da5815bf4b61de540b48e9a6920436f5d5a315fb0a4a7", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks/cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2626113d52c4f1113b41e75af461ad16f779271aee0b01c7600f4f3b8c753a50", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks/failures.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da1f31d57f5785e0866673baae9e2f1e1672efd5c423c5da541176cb7953c116", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/tasks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f778d0044cd830b629812b1ca8815b6eeee99f562741af269f6de00e6469980", + "format": 1 + }, + { + "name": "tests/integration/targets/volume/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/volume/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/server/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3ce1eba741aaee12d2aa6fa734e0584f58388f4988413d2d976513180ac1c72", + "format": 1 + }, + { + "name": "tests/integration/targets/server/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5c43334636f75ec79693cbbdb680061d3a426ae3944d5d84ccb9f93f43886b6", + "format": 1 + }, + { + "name": "tests/integration/targets/server/tasks/failures.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39075af57f8eba3ce7f50067dc89259ab4e5391411ebccde06383f54307f8800", + "format": 1 + }, + { + "name": "tests/integration/targets/server/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/targets/server/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/server/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfc6c77534214c87a0a023d21b6dff41891f3ab439dc934484adfde7d1def736", + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14732eb4e328df90778a7512eaba85e7ba941032a1cfb85c81a915eba15eaa4a", + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31705d25f3bf944957153d3c930086f5babcf873958adc76da981ecaf5bfb579", + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/tasks/unassigned.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd49224daf8665d9980a0041ac445c0402da2718fee85842cd7bf43369c9cfc0", + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/tasks/floating_ip.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "402b982a02df802093aee7979fcefc37506a388805b0c791e866737726e165db", + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/floating_ip/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad8168157b59ea211367da3adec3c025885b88e940a43fbdd8c5b9a8f06a791", + "format": 1 + }, + { + "name": "tests/integration/cloud-config-cloudscale.ini.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b442f74e11fdcdc779e16b3bf0a9f359b04b13b7d5c17175ff12d4b93eeb0b63", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9fbfcfa2dfc20ea57c6a825ee11ce751a5cd632cfded14b32ced3ad79ef34a8", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/cloudscale_ch/cloud/MANIFEST.json b/ansible_collections/cloudscale_ch/cloud/MANIFEST.json new file mode 100644 index 00000000..f01bf414 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/MANIFEST.json @@ -0,0 +1,36 @@ +{ + "collection_info": { + "namespace": "cloudscale_ch", + "name": "cloud", + "version": "2.2.4", + "authors": [ + "Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>", + "Denis Krienb\u00fchl <denis.krienbuehl@cloudscale.ch>", + "Ren\u00e9 Moser <mail@renemoser.net>" + ], + "readme": "README.md", + "tags": [ + "cloud", + "cloudscale", + "cloudscale_ch" + ], + "description": "Ansible Collection for cloudscale.ch", + "license": [ + "GPL-3.0-or-later" + ], + "license_file": null, + "dependencies": {}, + "repository": "https://github.com/cloudscale-ch/ansible-collection-cloudscale", + "documentation": "", + "homepage": "https://github.com/cloudscale-ch/ansible-collection-cloudscale", + "issues": "https://github.com/cloudscale-ch/ansible-collection-cloudscale/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1c13b1ba3bb60efc7ba7c373c914a9e125263bef719ff809a4b283e2b4c26ed", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/cloudscale_ch/cloud/README.md b/ansible_collections/cloudscale_ch/cloud/README.md new file mode 100644 index 00000000..7842aa0d --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/README.md @@ -0,0 +1,133 @@ + +![Collection Integration tests](https://github.com/cloudscale-ch/ansible-collection-cloudscale/workflows/Collection%20Integration%20tests/badge.svg) +[![Codecov](https://img.shields.io/codecov/c/github/cloudscale-ch/ansible-collection-cloudscale)](https://codecov.io/gh/cloudscale-ch/ansible-collection-cloudscale) +[![License](https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg)](LICENSE) + +# Ansible Collection for cloudscale.ch Cloud + +This collection provides a series of Ansible modules and plugins for interacting with the [cloudscale.ch](https://www.cloudscale.ch) Cloud. + +## Requirements + +- ansible version >= 2.9 + +## Installation + +To install the collection hosted in Galaxy: + +```bash +ansible-galaxy collection install cloudscale_ch.cloud +``` + +To upgrade to the latest version of the collection: + +```bash +ansible-galaxy collection install cloudscale_ch.cloud --force +``` + +## Usage + +### Playbooks + +To use a module from the cloudscale.ch collection, please reference the full namespace, collection name, and modules name that you want to use: + +```yaml +--- +- name: Using cloudscale.ch collection + hosts: localhost + tasks: + - cloudscale_ch.cloud.server: + name: web1 + image: debian-10 + flavor: flex-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + server_groups: web-group + zone: lpg1 + api_token: ... +``` + +Or you can add the full namespace and collection name in the `collections` element: + +```yaml +--- +- name: Using cloudscale.ch collection + hosts: localhost + collections: + - cloudscale_ch.cloud + tasks: + - server: + name: web1 + image: debian-10 + flavor: flex-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + server_groups: web-group + zone: lpg1 + api_token: ... +``` + +### Roles + +For existing Ansible roles, please also reference the full namespace, collection name, and modules name that are used in tasks instead of just modules name. + +### Plugins + +To use a plugin, please reference the full namespace, collection name, and plugins name that you want to use: + +```yaml +plugin: cloudscale_ch.cloud.<myplugin> +``` + +## Contributing + +There are many ways in which you can participate in the project, for example: + +- Submit bugs and feature requests, and help us verify them as they are checked in +- Review source code changes +- Review the documentation and make pull requests for anything from typos to new content +- If you are interested in fixing issues and contributing directly to the code base, please see the [CONTRIBUTING](CONTRIBUTING.md) document. + +## Releasing + +### Prepare a new release + +The changelog is managed using the `antsibull` tool. You can install +it using `pip install antsibull` + +1. Update version in galaxy.yml +2. Update changelog using antsibull +``` +antsibull-changelog release +``` +3. Commit changelog and new version +``` +git commit -m "Release version X.Y.Z" galaxy.yml CHANGELOG.rst changelogs/ +``` +4. Tag the release. Preferably create a GPG signed tag if you have a GPG +key. Version tags should be prefixed with "v" (otherwise the +integration tests won't run automatically). +``` +git tag -s -m "Version X.Y.Z" vX.Y.Z +``` +5. Push the release and tag +``` +git push origin master vX.Y.Z +``` + +### Release to Ansible Galaxy + +After the release is tagged and pushed to Github a release to Ansible +Galaxy can be created using the release feature in Github: + +1. **Wait for integration tests to succeed. They should automatically +run on new tags.** Only release if they succeed. Otherwise delete the +tag and fix the issue. +2. Create a release on Github by going to the release overview and + selecting "Draft a new release". + +## License + +GNU General Public License v3.0 + +See [COPYING](COPYING) to see the full text. diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore b/ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore new file mode 100644 index 00000000..6be6b533 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/changelogs/.gitignore @@ -0,0 +1 @@ +/.plugin-cache.yaml diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml b/ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml new file mode 100644 index 00000000..baa66363 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/changelogs/changelog.yaml @@ -0,0 +1,116 @@ +ancestor: null +releases: + 1.1.0: + changes: + minor_changes: + - floating_ip - added tags support (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/16) + fragments: + - 16-floating_ip_tags.yml + modules: + - description: Manages objects users on the cloudscale.ch IaaS service + name: objects_user + namespace: '' + release_date: '2020-08-18' + 1.2.0: + changes: + minor_changes: + - server_group - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/23). + - volume - The module has been refactored and the code simplifed (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/24). + fragments: + - consolidate-modules.yml + modules: + - description: Manages networks on the cloudscale.ch IaaS service + name: network + namespace: '' + release_date: '2020-10-13' + 1.3.0: + changes: + minor_changes: + - floating_ip - Added an optional name parameter to gain idempotency. The parameter + will be required for assigning a new floating IP with release of version 2.0.0 + (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/43/). + - floating_ip - Allow to reserve an IP without assignment to a server (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/31/). + fragments: + - floating-idempotency.yml + - floating-ip_optional_server.yml + modules: + - description: Manages subnets on the cloudscale.ch IaaS service + name: subnet + namespace: '' + release_date: '2020-11-23' + 1.3.1: + changes: + bugfixes: + - Fix inventory plugin failing to launch (https://github.com/cloudscale-ch/ansible-collection-cloudscale/issues/49). + minor_changes: + - Implemented identical naming support of the same resource type per zone (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/46). + fragments: + - allow-similar-name-per-zone.yml + - fix-inventory-plugin-error.yml + release_date: '2021-01-26' + 2.0.0: + changes: + breaking_changes: + - floating_ip - ``name`` is required for assigning a new floating IP. + fragments: + - floating-ip-require-name.yml + release_date: '2021-02-02' + 2.1.0: + changes: + deprecated_features: + - The aliases ``server_uuids`` and ``server_uuid`` of the servers parameter + in the volume module will be removed in version 3.0.0. + minor_changes: + - Add interface parameter to server module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54). + - Rename server_uuids parameter to servers in volume module (https://github.com/cloudscale-ch/ansible-collection-cloudscale/pull/54). + fragments: + - add-interface-parameter-to-server-module.yml + - deprecate-server_uuids-volume-module.yml + - rename-server_uuids-parameter-to-servers.yml + release_date: '2021-02-04' + 2.2.0: + changes: + major_changes: + - Add custom_image module + minor_changes: + - Increase api_timeout to 45 + - Read CLOUDSCALE_API_TIMEOUT environment variable + fragments: + - add-custom-image-module.yml + - increase-api-timeout-and-use-env.yml + modules: + - description: Manage custom images on the cloudscale.ch IaaS service + name: custom_image + namespace: '' + release_date: '2021-05-25' + 2.2.1: + changes: + minor_changes: + - 'Updated documentation: ``ssh_keys`` is a YAML list, not a string.' + release_date: '2022-03-21' + 2.2.2: + changes: + minor_changes: + - Fixed inventory documentation. + fragments: + - fix-sanity.yml + release_date: '2022-05-21' + 2.2.3: + changes: + minor_changes: + - Fixed a typo in region code. + - Fixed various documentation typos. + - Streamlined the flavors to the new format ``flex-y-x`` across the related + modules and tests. + fragments: + - 76-region-typos.yml + - 78-various-typos.yml + - 79-flavor-format.yml + release_date: '2022-11-16' + 2.2.4: + changes: + minor_changes: + - Add UEFI firmware type option for custom images. + fragments: + - add-uefi-option.yml + release_date: '2023-01-04' diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml b/ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml new file mode 100644 index 00000000..151cdf32 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Ansible Collection cloudscale.ch +trivial_section_name: trivial diff --git a/ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep b/ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/changelogs/fragments/.keep diff --git a/ansible_collections/cloudscale_ch/cloud/codecov.yml b/ansible_collections/cloudscale_ch/cloud/codecov.yml new file mode 100644 index 00000000..47fe4ef2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/codecov.yml @@ -0,0 +1,8 @@ +--- +coverage: + precision: 2 + round: down + range: "70...100" + +fixes: + - "/ansible_collections/cloudscale_ch/cloud/::" diff --git a/ansible_collections/cloudscale_ch/cloud/meta/runtime.yml b/ansible_collections/cloudscale_ch/cloud/meta/runtime.yml new file mode 100644 index 00000000..5751eb86 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/meta/runtime.yml @@ -0,0 +1,51 @@ +requires_ansible: '>=2.9.10' +action_groups: + cloudscale: + - floating_ip + - network + - objects_user + - server_group + - server + - subnet + - volume + +plugin_routing: + inventory: + cloudscale: + deprecation: + removal_date: 2021-12-12 + warning_text: Deprecated, use cloudscale_ch.cloud.inventory + redirect: cloudscale_ch.cloud.inventory + doc_fragments: + cloudscale: + deprecation: + removal_date: 2021-12-12 + warning_text: Deprecated, use cloudscale_ch.cloud.api_parameters + redirect: cloudscale_ch.cloud.api_parameters + module_utils: + cloudscale: + deprecation: + removal_date: 2021-12-12 + warning_text: Deprecated, use cloudscale_ch.cloud.api + redirect: cloudscale_ch.cloud.api + modules: + cloudscale_floating_ip: + deprecation: + removal_date: 2021-12-12 + warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.floating_ip + redirect: cloudscale_ch.cloud.floating_ip + cloudscale_server: + deprecation: + removal_date: 2021-12-12 + warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.server + redirect: cloudscale_ch.cloud.server + cloudscale_server_group: + deprecation: + removal_date: 2021-12-12 + warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.server_group + redirect: cloudscale_ch.cloud.server_group + cloudscale_volume: + deprecation: + removal_date: 2021-12-12 + warning_text: cloudscale_ prefixed module names are deprecated, use cloudscale_ch.cloud.volume + redirect: cloudscale_ch.cloud.volume diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/__init__.py diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py new file mode 100644 index 00000000..9ed63eed --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/doc_fragments/api_parameters.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = ''' +options: + api_url: + description: + - cloudscale.ch API URL. + - This can also be passed in the C(CLOUDSCALE_API_URL) environment variable. + default: https://api.cloudscale.ch/v1 + type: str + version_added: 1.3.0 + api_token: + description: + - cloudscale.ch API token. + - This can also be passed in the C(CLOUDSCALE_API_TOKEN) environment variable. + required: true + type: str + api_timeout: + description: + - Timeout in seconds for calls to the cloudscale.ch API. + - This can also be passed in the C(CLOUDSCALE_API_TIMEOUT) environment variable. + default: 45 + type: int +notes: + - All operations are performed using the cloudscale.ch public API v1. + - "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)." + - A valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at + U(https://control.cloudscale.ch). +''' diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/__init__.py diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py new file mode 100644 index 00000000..b55b1900 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/inventory/inventory.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +name: inventory +author: + - Gaudenz Steinlin (@gaudenz) +short_description: cloudscale.ch inventory source +description: + - Get inventory hosts from cloudscale.ch API + - Uses an YAML configuration file ending with either I(cloudscale.yml) or I(cloudscale.yaml) to set parameter values (also see examples). +extends_documentation_fragment: + - constructed +options: + api_token: + description: + - cloudscale.ch API token. + - This can also be passed in the C(CLOUDSCALE_API_TOKEN) environment variable. + type: str + plugin: + description: | + Token that ensures this is a source file for the 'cloudscale' + plugin. + required: True + choices: ['cloudscale'] + inventory_hostname: + description: | + What to register as the inventory hostname. + If set to 'uuid' the uuid of the server will be used and a + group will be created for the server name. + If set to 'name' the name of the server will be used unless + there are more than one server with the same name in which + case the 'uuid' logic will be used. + type: str + choices: + - name + - uuid + default: "name" + ansible_host: + description: | + Which IP address to register as the ansible_host. If the + requested value does not exist or this is set to 'none', no + ansible_host will be set. + type: str + choices: + - public_v4 + - public_v6 + - private + - none + default: public_v4 +''' + +EXAMPLES = r''' +# cloudscale.yml name ending file in YAML format +# Example command line: ansible-inventory --list -i inventory_cloudscale.yml + +plugin: cloudscale_ch.cloud.inventory + +# Example grouping by tag key "project" +plugin: cloudscale_ch.cloud.inventory +keyed_groups: + - prefix: project + key: cloudscale.tags.project + +# Example grouping by key "operating_system" lowercased and prefixed with "os" +plugin: cloudscale_ch.cloud.inventory +keyed_groups: + - prefix: os + key: cloudscale.image.operating_system | lower +''' +import os + +from collections import defaultdict +from json import loads + +from ansible.errors import AnsibleError +from ansible.module_utils.urls import open_url +from ansible.inventory.group import to_safe_group_name +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable + +iface_type_map = { + 'public_v4': ('public', 4), + 'public_v6': ('public', 6), + 'private': ('private', 4), + 'none': (None, None), +} + + +class InventoryModule(BaseInventoryPlugin, Constructable): + + NAME = 'cloudscale' + + @property + def api_url(self): + return os.environ.get( + 'CLOUDSCALE_API_URL', 'https://api.cloudscale.ch/v1') + + @property + def api_token(self): + return self.get_option('api_token') \ + or os.environ.get('CLOUDSCALE_API_TOKEN') + + def _get_server_list(self): + + # Get list of servers from cloudscale.ch API + response = open_url( + self.api_url + '/servers', + headers={'Authorization': 'Bearer %s' % self.api_token} + ) + return loads(response.read()) + + def verify_file(self, path): + ''' + :param path: the path to the inventory config file + :return the contents of the config file + ''' + if super(InventoryModule, self).verify_file(path): + if path.endswith(('cloudscale.yml', 'cloudscale.yaml')): + return True + self.display.debug( + "cloudscale inventory filename must end with 'cloudscale.yml' or 'cloudscale.yaml'" + ) + return False + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + + if not self.api_token: + raise AnsibleError('Could not find an API token. Set the ' + 'CLOUDSCALE_API_TOKEN environment variable.') + + inventory_hostname = self.get_option('inventory_hostname') + if inventory_hostname not in ('name', 'uuid'): + raise AnsibleError('Invalid value for option inventory_hostname: %s' + % inventory_hostname) + + ansible_host = self.get_option('ansible_host') + if ansible_host not in iface_type_map: + raise AnsibleError('Invalid value for option ansible_host: %s' + % ansible_host) + + # Merge servers with the same name + firstpass = defaultdict(list) + for server in self._get_server_list(): + firstpass[server['name']].append(server) + + # Add servers to inventory + for name, servers in firstpass.items(): + if len(servers) == 1 and inventory_hostname == 'name': + self.inventory.add_host(name) + servers[0]['inventory_hostname'] = name + else: + # Two servers with the same name exist, create a group + # with this name and add the servers by UUID + group_name = to_safe_group_name(name) + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + for server in servers: + self.inventory.add_host(server['uuid'], group_name) + server['inventory_hostname'] = server['uuid'] + + # Set variables + iface_type, iface_version = iface_type_map[ansible_host] + for server in servers: + hostname = server.pop('inventory_hostname') + if ansible_host != 'none': + addresses = [address['address'] + for interface in server['interfaces'] + for address in interface['addresses'] + if interface['type'] == iface_type + and address['version'] == iface_version] + + if len(addresses) > 0: + self.inventory.set_variable( + hostname, + 'ansible_host', + addresses[0], + ) + self.inventory.set_variable( + hostname, + 'cloudscale', + server, + ) + + variables = self.inventory.hosts[hostname].get_vars() + # Set composed variables + self._set_composite_vars( + self.get_option('compose'), + variables, + hostname, + self.get_option('strict'), + ) + + # Add host to composed groups + self._add_host_to_composed_groups( + self.get_option('groups'), + variables, + hostname, + self.get_option('strict'), + ) + + # Add host to keyed groups + self._add_host_to_keyed_groups( + self.get_option('keyed_groups'), + variables, + hostname, + self.get_option('strict'), + ) diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py b/ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py new file mode 100644 index 00000000..6c8ec4b5 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/module_utils/api.py @@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch> +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from copy import deepcopy +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text + + +def cloudscale_argument_spec(): + return dict( + api_url=dict( + type='str', + fallback=(env_fallback, ['CLOUDSCALE_API_URL']), + default='https://api.cloudscale.ch/v1', + ), + api_token=dict( + type='str', + fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']), + no_log=True, + required=True, + ), + api_timeout=dict( + type='int', + fallback=(env_fallback, ['CLOUDSCALE_API_TIMEOUT']), + default=45, + ), + ) + + +class AnsibleCloudscaleApi(object): + + def __init__(self, module): + self._module = module + + self._api_url = module.params['api_url'] + if not self._api_url.endswith('/'): + self._api_url = self._api_url + '/' + + self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']} + + def _get(self, api_call): + resp, info = fetch_url(self._module, self._api_url + api_call, + headers=self._auth_header, + timeout=self._module.params['api_timeout']) + + if info['status'] == 200: + return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict')) + elif info['status'] == 404: + return None + else: + self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for ' + '"%s".' % api_call, fetch_url_info=info) + + def _post_or_patch(self, api_call, method, data, filter_none=True): + # This helps with tags when we have the full API resource href to update. + if self._api_url not in api_call: + api_endpoint = self._api_url + api_call + else: + api_endpoint = api_call + + headers = self._auth_header.copy() + if data is not None: + # Sanitize data dictionary + # Deepcopy: Duplicate the data object for iteration, because + # iterating an object and changing it at the same time is insecure + for k, v in deepcopy(data).items(): + if filter_none and v is None: + del data[k] + + data = self._module.jsonify(data) + headers['Content-type'] = 'application/json' + + resp, info = fetch_url(self._module, + api_endpoint, + headers=headers, + method=method, + data=data, + timeout=self._module.params['api_timeout']) + + if info['status'] in (200, 201): + return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict')) + elif info['status'] == 204: + return None + else: + self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for ' + '"%s".' % (method, api_endpoint), fetch_url_info=info) + + def _post(self, api_call, data=None): + return self._post_or_patch(api_call, 'POST', data) + + def _patch(self, api_call, data=None, filter_none=True): + return self._post_or_patch(api_call, 'PATCH', data, filter_none) + + def _delete(self, api_call): + # api_call might be full href already + if self._api_url not in api_call: + api_endpoint = self._api_url + api_call + else: + api_endpoint = api_call + + resp, info = fetch_url(self._module, + api_endpoint, + headers=self._auth_header, + method='DELETE', + timeout=self._module.params['api_timeout']) + + if info['status'] == 204: + return None + else: + self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for ' + '"%s".' % api_endpoint, fetch_url_info=info) + + +class AnsibleCloudscaleBase(AnsibleCloudscaleApi): + + def __init__( + self, + module, + resource_name='', + resource_key_uuid='uuid', + resource_key_name='name', + resource_create_param_keys=None, + resource_update_param_keys=None, + ): + super(AnsibleCloudscaleBase, self).__init__(module) + self._result = { + 'changed': False, + 'diff': dict( + before=dict(), + after=dict() + ), + } + self._resource_data = dict() + + # The identifier key of the resource, usually 'uuid' + self.resource_key_uuid = resource_key_uuid + + # The name key of the resource, usually 'name' + self.resource_key_name = resource_key_name + + # The API resource e.g server-group + self.resource_name = resource_name + + # List of params used to create the resource + self.resource_create_param_keys = resource_create_param_keys or ['name'] + + # List of params used to update the resource + self.resource_update_param_keys = resource_update_param_keys or ['name'] + + # Resource has no name field but tags, we use a defined tag as name + self.use_tag_for_name = False + self.resource_name_tag = "ansible_name" + + # Constraint Keys to match when query by name + self.query_constraint_keys = [] + + def pre_transform(self, resource): + return resource + + def init_resource(self): + return { + 'state': "absent", + self.resource_key_uuid: self._module.params.get(self.resource_key_uuid) or self._resource_data.get(self.resource_key_uuid), + self.resource_key_name: self._module.params.get(self.resource_key_name) or self._resource_data.get(self.resource_key_name), + } + + def query(self): + # Initialize + self._resource_data = self.init_resource() + + # Query by UUID + uuid = self._module.params[self.resource_key_uuid] + if uuid is not None: + + # network id case + if "/" in uuid: + uuid = uuid.split("/")[0] + + resource = self._get('%s/%s' % (self.resource_name, uuid)) + if resource: + self._resource_data = resource + self._resource_data['state'] = "present" + + # Query by name + else: + name = self._module.params[self.resource_key_name] + + # Resource has no name field, we use a defined tag as name + if self.use_tag_for_name: + resources = self._get('%s?tag:%s=%s' % (self.resource_name, self.resource_name_tag, name)) + else: + resources = self._get('%s' % self.resource_name) + + matching = [] + for resource in resources: + if self.use_tag_for_name: + resource[self.resource_key_name] = resource['tags'].get(self.resource_name_tag) + + # Skip resource if constraints is not given e.g. in case of floating_ip the ip_version differs + for constraint_key in self.query_constraint_keys: + if self._module.params[constraint_key] is not None: + if constraint_key == 'zone': + resource_value = resource['zone']['slug'] + else: + resource_value = resource[constraint_key] + + if resource_value != self._module.params[constraint_key]: + break + else: + if resource[self.resource_key_name] == name: + matching.append(resource) + + # Fail on more than one resource with identical name + if len(matching) > 1: + self._module.fail_json( + msg="More than one %s resource with '%s' exists: %s. " + "Use the '%s' parameter to identify the resource." % ( + self.resource_name, + self.resource_key_name, + name, + self.resource_key_uuid + ) + ) + elif len(matching) == 1: + self._resource_data = matching[0] + self._resource_data['state'] = "present" + + return self.pre_transform(self._resource_data) + + def create(self, resource, data=None): + # Fail if UUID/ID was provided but the resource was not found on state=present. + uuid = self._module.params.get(self.resource_key_uuid) + if uuid is not None: + self._module.fail_json(msg="The resource with UUID '%s' was not found " + "and we would create a new one with different UUID, " + "this is probably not want you have asked for." % uuid) + + self._result['changed'] = True + + if not data: + data = dict() + + for param in self.resource_create_param_keys: + data[param] = self._module.params.get(param) + + self._result['diff']['before'] = deepcopy(resource) + self._result['diff']['after'] = deepcopy(resource) + self._result['diff']['after'].update(deepcopy(data)) + self._result['diff']['after'].update({ + 'state': "present", + }) + + if not self._module.check_mode: + resource = self._post(self.resource_name, data) + resource = self.pre_transform(resource) + resource['state'] = "present" + return resource + + def update(self, resource): + updated = False + for param in self.resource_update_param_keys: + updated = self._param_updated(param, resource) or updated + + # Refresh if resource was updated in live mode + if updated and not self._module.check_mode: + resource = self.query() + return resource + + def present(self): + resource = self.query() + + if self.use_tag_for_name: + name_tag_value = self._module.params[self.resource_key_name] or resource.get('tags', dict()).get(self.resource_name_tag) + if name_tag_value: + self._module.params['tags'] = self._module.params['tags'] or dict() + self._module.params['tags'].update({ + self.resource_name_tag: name_tag_value + }) + + if resource['state'] == "absent": + resource = self.create(resource) + else: + resource = self.update(resource) + return self.get_result(resource) + + def absent(self): + resource = self.query() + if resource['state'] != "absent": + self._result['changed'] = True + self._result['diff']['before'] = deepcopy(resource) + self._result['diff']['after'] = self.init_resource() + + if not self._module.check_mode: + href = resource.get('href') + if not href: + self._module.fail_json(msg='Unable to delete %s, no href found.') + + self._delete(href) + resource['state'] = "absent" + return self.get_result(resource) + + def find_difference(self, key, resource, param): + is_different = False + + # If it looks like a stub + if isinstance(resource[key], dict) and 'href' in resource[key]: + uuid = resource[key].get('href', '').split('/')[-1] + if param != uuid: + is_different = True + + elif param != resource[key]: + is_different = True + + return is_different + + def _param_updated(self, key, resource): + param = self._module.params.get(key) + if param is None: + return False + + if not resource or key not in resource: + return False + + is_different = self.find_difference(key, resource, param) + + if is_different: + self._result['changed'] = True + + patch_data = { + key: param + } + + self._result['diff']['before'].update({key: resource[key]}) + self._result['diff']['after'].update(patch_data) + + if not self._module.check_mode: + href = resource.get('href') + if not href: + self._module.fail_json(msg='Unable to update %s, no href found.' % key) + + self._patch(href, patch_data) + return True + return False + + def get_result(self, resource): + if resource: + for k, v in resource.items(): + self._result[k] = v + + # Transform the name tag to a name field + if self.use_tag_for_name: + self._result['name'] = self._result.get('tags', dict()).pop(self.resource_name_tag, None) + + return self._result diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/__init__.py diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py new file mode 100644 index 00000000..f0bf52c3 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/custom_image.py @@ -0,0 +1,468 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021, Ciril Troxler <ciril.troxler@cloudscale.ch> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: custom_image +short_description: Manage custom images on the cloudscale.ch IaaS service +description: + - Import, modify and delete custom images. +notes: + - To import a new custom-image the I(url) and I(name) options are required. +author: + - Ciril Troxler (@ctx) + - Gaudenz Steinlin (@gaudenz) +version_added: 2.2.0 +options: + url: + description: + - The URL used to download the image. + type: str + force_retry: + description: + - Retry the image import even if a failed import using the same name and + URL already exists. This is necessary to recover from download errors. + default: no + type: bool + name: + description: + - The human readable name of the custom image. Either name or UUID must + be present to change an existing image. + type: str + uuid: + description: + - The unique identifier of the custom image import. Either name or UUID + must be present to change an existing image. + type: str + slug: + description: + - A string identifying the custom image for use within the API. + type: str + user_data_handling: + description: + - How user_data will be handled when creating a server. There are + currently two options, "pass-through" and "extend-cloud-config". + type: str + choices: [ pass-through, extend-cloud-config ] + zones: + description: + - Specify zones in which the custom image will be available (e.g. C(lpg1) + or C(rma1)). + type: list + elements: str + source_format: + description: + - The file format of the image referenced in the url. Currently only raw + is supported. + type: str + firmware_type: + description: + - The firmware type that will be used for servers created + with this image. + type: str + choices: [ bios, uefi ] + default: bios + tags: + description: + - The tags assigned to the custom image. + type: dict + state: + description: State of the coustom image. + choices: [ present, absent ] + default: present + type: str +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = r''' +- name: Import custom image + cloudscale_ch.cloud.custom_image: + name: "My Custom Image" + url: https://ubuntu.com/downloads/hirsute.img + slug: my-custom-image + user_data_handling: extend-cloud-config + zones: lpg1 + tags: + project: luna + state: present + register: my_custom_image + +- name: Wait until import succeeded + cloudscale_ch.cloud.custom_image: + uuid: "{{ my_custom_image.uuid }}" + retries: 15 + delay: 5 + register: image + until: image.import_status == 'success' + failed_when: image.import_status == 'failed' + +- name: Import custom image and wait until import succeeded + cloudscale_ch.cloud.custom_image: + name: "My Custom Image" + url: https://ubuntu.com/downloads/hirsute.img + slug: my-custom-image + user_data_handling: extend-cloud-config + zones: lpg1 + tags: + project: luna + state: present + retries: 15 + delay: 5 + register: image + until: image.import_status == 'success' + failed_when: image.import_status == 'failed' + +- name: Import custom image with UEFI firmware type + cloudscale_ch.cloud.custom_image: + name: "My Custom UEFI Image" + url: https://ubuntu.com/downloads/hirsute.img + slug: my-custom-uefi-image + user_data_handling: extend-cloud-config + zones: lpg1 + firmware_type: uefi + tags: + project: luna + state: present + register: my_custom_image + +- name: Update custom image + cloudscale_ch.cloud.custom_image: + name: "My Custom Image" + slug: my-custom-image + user_data_handling: extend-cloud-config + tags: + project: luna + state: present + +- name: Delete custom image + cloudscale_ch.cloud.custom_image: + uuid: '{{ my_custom_image.uuid }}' + state: absent + +- name: List all custom images + uri: + url: 'https://api.cloudscale.ch/v1/custom-images' + headers: + Authorization: 'Bearer {{ query("env", "CLOUDSCALE_API_TOKEN") }}' + status_code: 200 + register: image_list +- name: Search the image list for all images with name 'My Custom Image' + set_fact: + my_custom_images: '{{ image_list.json | selectattr("name","search", "My Custom Image" ) }}' +''' + +RETURN = r''' +href: + description: The API URL to get details about this resource. + returned: success when state == present + type: str + sample: https://api.cloudscale.ch/v1/custom-imges/11111111-1864-4608-853a-0771b6885a3a +uuid: + description: The unique identifier of the custom image. + returned: success + type: str + sample: 11111111-1864-4608-853a-0771b6885a3a +name: + description: The human readable name of the custom image. + returned: success + type: str + sample: alan +created_at: + description: The creation date and time of the resource. + returned: success + type: str + sample: "2020-05-29T13:18:42.511407Z" +slug: + description: A string identifying the custom image for use within the API. + returned: success + type: str + sample: foo +checksums: + description: The checksums of the custom image as key and value pairs. The + algorithm (e.g. sha256) name is in the key and the checksum in the value. + The set of algorithms used might change in the future. + returned: success + type: dict + sample: { + "md5": "5b3a1f21cde154cfb522b582f44f1a87", + "sha256": "5b03bcbd00b687e08791694e47d235a487c294e58ca3b1af704120123aa3f4e6" + } +user_data_handling: + description: How user_data will be handled when creating a server. There are + currently two options, "pass-through" and "extend-cloud-config". + returned: success + type: str + sample: "pass-through" +tags: + description: Tags assosiated with the custom image. + returned: success + type: dict + sample: { 'project': 'my project' } +import_status: + description: Shows the progress of an import. Values are one of + "started", "in_progress", "success" or "failed". + returned: success + type: str + sample: "in_progress" +error_message: + description: Error message in case of a failed import. + returned: success + type: str + sample: "Expected HTTP 200, got HTTP 403" +state: + description: The current status of the custom image. + returned: success + type: str + sample: present +''' + + +from ansible.module_utils.basic import ( + AnsibleModule, +) +from ansible.module_utils.urls import ( + fetch_url +) +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) +from ansible.module_utils._text import ( + to_text +) + + +class AnsibleCloudscaleCustomImage(AnsibleCloudscaleBase): + + def _transform_import_to_image(self, imp): + # Create a stub image from the import + img = imp.get('custom_image', {}) + return { + 'href': img.get('href'), + 'uuid': imp['uuid'], + 'name': img.get('name'), + 'created_at': None, + 'size_gb': None, + 'checksums': None, + 'tags': imp['tags'], + 'url': imp['url'], + 'import_status': imp['status'], + 'error_message': imp.get('error_message', ''), + # Even failed image imports are reported as present. This then + # represents a failed import resource. + 'state': 'present', + # These fields are not present on the import, assume they are + # unchanged from the module parameters + 'user_data_handling': self._module.params['user_data_handling'], + 'zones': self._module.params['zones'], + 'slug': self._module.params['slug'], + 'firmware_type': self._module.params['firmware_type'], + } + + # This method can be replaced by calling AnsibleCloudscaleBase._get form + # AnsibleCloudscaleCustomImage._get once the API bug is fixed. + def _get_url(self, url): + + response, info = fetch_url(self._module, + url, + headers=self._auth_header, + method='GET', + timeout=self._module.params['api_timeout']) + + if info['status'] == 200: + response = self._module.from_json( + to_text(response.read(), + errors='surrogate_or_strict'), + ) + elif info['status'] == 404: + # Return None to be compatible with AnsibleCloudscaleBase._get + response = None + elif info['status'] == 500 and url.startswith(self._api_url + self.resource_name + '/import/'): + # Workaround a bug in the cloudscale.ch API which wrongly returns + # 500 instead of 404 + response = None + else: + self._module.fail_json( + msg='Failure while calling the cloudscale.ch API with GET for ' + '"%s"' % url, + fetch_url_info=info, + ) + + return response + + def _get(self, api_call): + + # Split api_call into components + api_url, call_uuid = api_call.split(self.resource_name) + + # If the api_call does not contain the API URL + if not api_url: + api_url = self._api_url + + # Fetch image(s) from the regular API endpoint + response = self._get_url(api_url + self.resource_name + call_uuid) or [] + + # Additionally fetch image(s) from the image import API endpoint + response_import = self._get_url( + api_url + self.resource_name + '/import' + call_uuid, + ) or [] + + # No image was found + if call_uuid and response == [] and response_import == []: + return None + + # Convert single image responses (call with UUID) into a list + if call_uuid and response: + response = [response] + if call_uuid and response_import: + response_import = [response_import] + + # Transform lists into UUID keyed dicts + response = dict([(i['uuid'], i) for i in response]) + response_import = dict([(i['uuid'], i) for i in response_import]) + + # Filter the import list so that successfull and in_progress imports + # shadow failed imports + response_import_filtered = dict([(k, v) for k, v + in response_import.items() + if v['status'] in ('success', + 'in_progress')]) + # Only add failed imports if no import with the same name exists + # Only add the last failed import in the list (there is no timestamp on + # imports) + import_names = set([v['custom_image']['name'] for k, v + in response_import_filtered.items()]) + for k, v in reversed(list(response_import.items())): + name = v['custom_image']['name'] + if (v['status'] == 'failed' and name not in import_names): + import_names.add(name) + response_import_filtered[k] = v + + # Merge import list into image list + for uuid, imp in response_import_filtered.items(): + if uuid in response: + # Merge addtional fields only present on the import + response[uuid].update( + url=imp['url'], + import_status=imp['status'], + error_message=imp.get('error_message', ''), + ) + else: + response[uuid] = self._transform_import_to_image(imp) + + if not call_uuid: + return response.values() + else: + return next(iter(response.values())) + + def _post(self, api_call, data=None): + # Only new image imports are supported, no direct POST call to image + # resources are supported by the API + if not api_call.endswith('custom-images'): + self._module.fail_json(msg="Error: Bad api_call URL.") + # Custom image imports use a different endpoint + api_call += '/import' + + if self._module.params['url']: + return self._transform_import_to_image( + self._post_or_patch("%s" % api_call, 'POST', data), + ) + else: + self._module.fail_json(msg="Cannot import a new image without url.") + + def present(self): + resource = self.query() + + # If the module passes the firmware_type argument, + # and the module argument and API response are not the same for + # argument firmware_type. + if (resource.get('firmware_type') is not None + and resource.get('firmware_type') != + self._module.params['firmware_type']): + # Custom error if the module tries to change the firmware_type. + msg = "Cannot change firmware type of an existing custom image" + self._module.fail_json(msg) + + if resource['state'] == "absent": + resource = self.create(resource) + else: + # If this is a failed upload and the URL changed or the "force_retry" + # parameter is used, create a new image import. + if (resource.get('import_status') == 'failed' + and (resource['url'] != self._module.params['url'] + or self._module.params['force_retry'])): + resource = self.create(resource) + else: + resource = self.update(resource) + + return self.get_result(resource) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + name=dict(type='str'), + slug=dict(type='str'), + url=dict(type='str'), + force_retry=dict(type='bool', default=False), + user_data_handling=dict(type='str', + choices=('pass-through', + 'extend-cloud-config')), + uuid=dict(type='str'), + firmware_type=dict(type='str', + choices=('bios', + 'uefi'), + default=('bios')), + tags=dict(type='dict'), + state=dict(type='str', default='present', + choices=('present', 'absent')), + zones=dict(type='list', elements='str'), + source_format=dict(type='str'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + supports_check_mode=True, + ) + + cloudscale_custom_image = AnsibleCloudscaleCustomImage( + module, + resource_name='custom-images', + resource_key_uuid='uuid', + resource_key_name='name', + resource_create_param_keys=[ + 'name', + 'slug', + 'url', + 'user_data_handling', + 'firmware_type', + 'tags', + 'zones', + 'source_format', + ], + resource_update_param_keys=[ + 'name', + 'slug', + 'user_data_handling', + 'firmware_type', + 'tags', + ], + ) + + if module.params['state'] == "absent": + result = cloudscale_custom_image.absent() + else: + result = cloudscale_custom_image.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py new file mode 100644 index 00000000..7f578d18 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/floating_ip.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: floating_ip +short_description: Manages floating IPs on the cloudscale.ch IaaS service +description: + - Create, assign and delete floating IPs on the cloudscale.ch IaaS service. +notes: + - Once a floating_ip is created, all parameters except C(server), C(reverse_ptr) and C(tags) are read-only. +author: + - Gaudenz Steinlin (@gaudenz) + - Denis Krienbühl (@href) + - René Moser (@resmo) +version_added: 1.0.0 +options: + network: + description: + - Floating IP address to change. + - One of I(network) or I(name) is required to identify the floating IP. + aliases: [ ip ] + type: str + name: + description: + - Name to identifiy the floating IP address for idempotency. + - One of I(network) or I(name) is required to identify the floating IP. + - Required for assigning a new floating IP. + version_added: 1.3.0 + type: str + state: + description: + - State of the floating IP. + default: present + choices: [ present, absent ] + type: str + ip_version: + description: + - IP protocol version of the floating IP. + - Required when assigning a new floating IP. + choices: [ 4, 6 ] + type: int + server: + description: + - UUID of the server assigned to this floating IP. + type: str + type: + description: + - The type of the floating IP. + choices: [ regional, global ] + type: str + default: regional + region: + description: + - Region in which the floating IP resides (e.g. C(lpg) or C(rma)). + If omitted, the region of the project default zone is used. + This parameter must be omitted if I(type) is set to C(global). + type: str + prefix_length: + description: + - Only valid if I(ip_version) is 6. + - Prefix length for the IPv6 network. Currently only a prefix of /56 can be requested. If no I(prefix_length) is present, a + single address is created. + choices: [ 56 ] + type: int + reverse_ptr: + description: + - Reverse PTR entry for this address. + - You cannot set a reverse PTR entry for IPv6 floating networks. Reverse PTR entries are only allowed for single addresses. + type: str + tags: + description: + - Tags associated with the floating IP. Set this to C({}) to clear any tags. + type: dict + version_added: 1.1.0 +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = ''' +# Request a new floating IP without assignment to a server +- name: Request a floating IP + cloudscale_ch.cloud.floating_ip: + name: IP to my server + ip_version: 4 + reverse_ptr: my-server.example.com + api_token: xxxxxx + +# Request a new floating IP with assignment +- name: Request a floating IP + cloudscale_ch.cloud.floating_ip: + name: web + ip_version: 4 + server: 47cec963-fcd2-482f-bdb6-24461b2d47b1 + reverse_ptr: my-server.example.com + api_token: xxxxxx + +# Assign an existing floating IP to a different server by its IP address +- name: Move floating IP to backup server + cloudscale_ch.cloud.floating_ip: + ip: 192.0.2.123 + server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Assign an existing floating IP to a different server by name +- name: Move floating IP to backup server + cloudscale_ch.cloud.floating_ip: + name: IP to my server + server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Request a new floating IPv6 network +- name: Request a floating IP + cloudscale_ch.cloud.floating_ip: + name: IPv6 to my server + ip_version: 6 + prefix_length: 56 + server: 47cec963-fcd2-482f-bdb6-24461b2d47b1 + api_token: xxxxxx + region: lpg1 + +# Assign an existing floating network to a different server +- name: Move floating IP to backup server + cloudscale_ch.cloud.floating_ip: + ip: '{{ floating_ip.ip }}' + server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Remove a floating IP +- name: Release floating IP + cloudscale_ch.cloud.floating_ip: + ip: 192.0.2.123 + state: absent + api_token: xxxxxx + +# Remove a floating IP by name +- name: Release floating IP + cloudscale_ch.cloud.floating_ip: + name: IP to my server + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +name: + description: The name of the floating IP. + returned: success + type: str + sample: my floating ip + version_added: 1.3.0 +href: + description: The API URL to get details about this floating IP. + returned: success when state == present + type: str + sample: https://api.cloudscale.ch/v1/floating-ips/2001:db8::cafe +network: + description: The CIDR notation of the network that is routed to your server. + returned: success + type: str + sample: 2001:db8::cafe/128 +next_hop: + description: Your floating IP is routed to this IP address. + returned: success when state == present + type: str + sample: 2001:db8:dead:beef::42 +reverse_ptr: + description: The reverse pointer for this floating IP address. + returned: success when state == present + type: str + sample: 185-98-122-176.cust.cloudscale.ch +server: + description: The floating IP is routed to this server. + returned: success when state == present + type: str + sample: 47cec963-fcd2-482f-bdb6-24461b2d47b1 +ip: + description: The floating IP address. + returned: success when state == present + type: str + sample: 185.98.122.176 +region: + description: The region of the floating IP. + returned: success when state == present + type: dict + sample: {'slug': 'lpg'} +state: + description: The current status of the floating IP. + returned: success + type: str + sample: present +tags: + description: Tags assosiated with the floating IP. + returned: success + type: dict + sample: { 'project': 'my project' } + version_added: 1.1.0 +''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + + +class AnsibleCloudscaleFloatingIp(AnsibleCloudscaleBase): + + def __init__(self, module): + super(AnsibleCloudscaleFloatingIp, self).__init__( + module=module, + resource_key_uuid='network', + resource_name='floating-ips', + resource_create_param_keys=[ + 'ip_version', + 'server', + 'prefix_length', + 'reverse_ptr', + 'type', + 'region', + 'tags', + ], + resource_update_param_keys=[ + 'server', + 'reverse_ptr', + 'tags', + ], + ) + self.use_tag_for_name = True + self.query_constraint_keys = ['ip_version'] + + def pre_transform(self, resource): + if 'server' in resource and isinstance(resource['server'], dict): + resource['server'] = resource['server']['uuid'] + return resource + + def create(self, resource): + # Fail when missing params for creation + self._module.fail_on_missing_params(['ip_version', 'name']) + return super(AnsibleCloudscaleFloatingIp, self).create(resource) + + def get_result(self, resource): + network = resource.get('network') + if network: + self._result['ip'] = network.split('/')[0] + return super(AnsibleCloudscaleFloatingIp, self).get_result(resource) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + name=dict(type='str'), + state=dict(default='present', choices=('present', 'absent'), type='str'), + network=dict(aliases=('ip',), type='str'), + ip_version=dict(choices=(4, 6), type='int'), + server=dict(type='str'), + type=dict(type='str', choices=('regional', 'global'), default='regional'), + region=dict(type='str'), + prefix_length=dict(choices=(56,), type='int'), + reverse_ptr=dict(type='str'), + tags=dict(type='dict'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('network', 'name'),), + supports_check_mode=True, + ) + + cloudscale_floating_ip = AnsibleCloudscaleFloatingIp(module) + + if module.params['state'] == 'absent': + result = cloudscale_floating_ip.absent() + else: + result = cloudscale_floating_ip.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py new file mode 100644 index 00000000..7b1da5b2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/network.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2020, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: network +short_description: Manages networks on the cloudscale.ch IaaS service +description: + - Create, update and remove networks. +author: + - René Moser (@resmo) +version_added: "1.2.0" +options: + name: + description: + - Name of the network. + - Either I(name) or I(uuid) is required. + type: str + uuid: + description: + - UUID of the network. + - Either I(name) or I(uuid) is required. + type: str + mtu: + description: + - The MTU of the network. + default: 9000 + type: int + auto_create_ipv4_subnet: + description: + - Whether to automatically create an IPv4 subnet in the network or not. + default: true + type: bool + zone: + description: + - Zone slug of the network (e.g. C(lpg1) or C(rma1)). + type: str + state: + description: + - State of the network. + choices: [ present, absent ] + default: present + type: str + tags: + description: + - Tags assosiated with the networks. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = ''' +--- +- name: Ensure network exists + cloudscale_ch.cloud.network: + name: my network + api_token: xxxxxx + +- name: Ensure network in a specific zone + cloudscale_ch.cloud.network: + name: my network + zone: lpg1 + api_token: xxxxxx + +- name: Ensure a network is absent + cloudscale_ch.cloud.network: + name: my network + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +--- +href: + description: API URL to get details about this network. + returned: success + type: str + sample: https://api.cloudscale.ch/v1/networks/cfde831a-4e87-4a75-960f-89b0148aa2cc +uuid: + description: The unique identifier for the network. + returned: success + type: str + sample: cfde831a-4e87-4a75-960f-89b0148aa2cc +name: + description: The name of the network. + returned: success + type: str + sample: my network +created_at: + description: The creation date and time of the network. + returned: success + type: str + sample: "2019-05-29T13:18:42.511407Z" +subnets: + description: A list of subnets objects of the network. + returned: success + type: complex + contains: + href: + description: API URL to get details about the subnet. + returned: success + type: str + sample: https://api.cloudscale.ch/v1/subnets/33333333-1864-4608-853a-0771b6885a3 + uuid: + description: The unique identifier for the subnet. + returned: success + type: str + sample: 33333333-1864-4608-853a-0771b6885a3 + cidr: + description: The CIDR of the subnet. + returned: success + type: str + sample: 172.16.0.0/24 +mtu: + description: The MTU of the network. + returned: success + type: int + sample: 9000 +zone: + description: The zone of the network. + returned: success + type: dict + sample: { 'slug': 'rma1' } +state: + description: State of the network. + returned: success + type: str + sample: present +tags: + description: Tags assosiated with the network. + returned: success + type: dict + sample: { 'project': 'my project' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + name=dict(type='str'), + uuid=dict(type='str'), + mtu=dict(type='int', default=9000), + auto_create_ipv4_subnet=dict(type='bool', default=True), + zone=dict(type='str'), + tags=dict(type='dict'), + state=dict(default='present', choices=['absent', 'present']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + required_if=(('state', 'present', ('name',),),), + supports_check_mode=True, + ) + + cloudscale_network = AnsibleCloudscaleBase( + module, + resource_name='networks', + resource_create_param_keys=[ + 'name', + 'mtu', + 'auto_create_ipv4_subnet', + 'zone', + 'tags', + ], + resource_update_param_keys=[ + 'name', + 'mtu', + 'tags', + ], + ) + + cloudscale_network.query_constraint_keys = [ + 'zone', + ] + + if module.params['state'] == 'absent': + result = cloudscale_network.absent() + else: + result = cloudscale_network.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py new file mode 100644 index 00000000..e6deae7e --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/objects_user.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2020, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: objects_user +short_description: Manages objects users on the cloudscale.ch IaaS service +description: + - Create, update and remove objects users cloudscale.ch IaaS service. +author: + - Rene Moser (@resmo) +version_added: 1.1.0 +options: + display_name: + description: + - Display name of the objects user. + - Either I(display_name) or I(id) is required. + type: str + aliases: + - name + id: + description: + - Name of the objects user. + - Either I(display_name) or I(id) is required. + type: str + tags: + description: + - Tags associated with the objects user. Set this to C({}) to clear any tags. + type: dict + state: + description: + - State of the objects user. + default: present + choices: [ present, absent ] + type: str +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = r''' +- name: Create an objects user + cloudscale_ch.cloud.objects_user: + display_name: alan + tags: + project: luna + api_token: xxxxxx + register: object_user + +- name: print keys + debug: + var: object_user.keys + +- name: Update an objects user + cloudscale_ch.cloud.objects_user: + display_name: alan + tags: + project: gemini + api_token: xxxxxx + +- name: Remove an objects user + cloudscale_ch.cloud.objects_user: + display_name: alan + state: absent + api_token: xxxxxx +''' + +RETURN = r''' +href: + description: The API URL to get details about this resource. + returned: success when state == present + type: str + sample: https://api.cloudscale.ch/v1/objects-users/6fe39134bf4178747eebc429f82cfafdd08891d4279d0d899bc4012db1db6a15 +display_name: + description: The display name of the objects user. + returned: success + type: str + sample: alan +id: + description: The ID of the objects user. + returned: success + type: str + sample: 6fe39134bf4178747eebc429f82cfafdd08891d4279d0d899bc4012db1db6a15 +keys: + description: List of key objects. + returned: success + type: complex + contains: + access_key: + description: The access key. + returned: success + type: str + sample: 0ZTAIBKSGYBRHQ09G11W + secret_key: + description: The secret key. + returned: success + type: str + sample: bn2ufcwbIa0ARLc5CLRSlVaCfFxPHOpHmjKiH34T +tags: + description: Tags assosiated with the objects user. + returned: success + type: dict + sample: { 'project': 'my project' } +state: + description: The current status of the objects user. + returned: success + type: str + sample: present +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + display_name=dict(type='str', aliases=['name']), + id=dict(type='str'), + tags=dict(type='dict'), + state=dict(type='str', default='present', choices=('present', 'absent')), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('display_name', 'id'),), + required_if=(('state', 'present', ('display_name',),),), + supports_check_mode=True, + ) + + cloudscale_objects_user = AnsibleCloudscaleBase( + module, + resource_name='objects-users', + resource_key_uuid='id', + resource_key_name='display_name', + resource_create_param_keys=[ + 'display_name', + 'tags', + ], + resource_update_param_keys=[ + 'display_name', + 'tags', + ], + ) + + if module.params['state'] == "absent": + result = cloudscale_objects_user.absent() + else: + result = cloudscale_objects_user.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py new file mode 100644 index 00000000..d2b07feb --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py @@ -0,0 +1,737 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch> +# Copyright: (c) 2019, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: server +short_description: Manages servers on the cloudscale.ch IaaS service +description: + - Create, update, start, stop and delete servers on the cloudscale.ch IaaS service. +notes: + - If I(uuid) option is provided, it takes precedence over I(name) for server selection. This allows to update the server's name. + - If no I(uuid) option is provided, I(name) is used for server selection. If more than one server with this name exists, execution is aborted. + - Only the I(name) and I(flavor) are evaluated for the update. + - The option I(force=true) must be given to allow the reboot of existing running servers for applying the changes. +author: + - Gaudenz Steinlin (@gaudenz) + - René Moser (@resmo) + - Denis Krienbühl (@href) +version_added: "1.0.0" +options: + state: + description: + - State of the server. + choices: [ running, stopped, absent ] + default: running + type: str + name: + description: + - Name of the Server. + - Either I(name) or I(uuid) are required. + type: str + uuid: + description: + - UUID of the server. + - Either I(name) or I(uuid) are required. + type: str + flavor: + description: + - Flavor of the server. + type: str + image: + description: + - Image used to create the server. + type: str + zone: + description: + - Zone in which the server resides (e.g. C(lpg1) or C(rma1)). + type: str + volume_size_gb: + description: + - Size of the root volume in GB. + default: 10 + type: int + bulk_volume_size_gb: + description: + - Size of the bulk storage volume in GB. + - No bulk storage volume if not set. + type: int + ssh_keys: + description: + - List of SSH public keys. + - Use the full content of your .pub file here. + type: list + elements: str + password: + description: + - Password for the server. + type: str + use_public_network: + description: + - Attach a public network interface to the server. + type: bool + use_private_network: + description: + - Attach a private network interface to the server. + type: bool + use_ipv6: + description: + - Enable IPv6 on the public network interface. + default: yes + type: bool + interfaces: + description: + - List of network interface objects specifying the interfaces to be attached to the server. + See U(https://www.cloudscale.ch/en/api/v1/#interfaces-attribute-specification) for more details. + type: list + elements: dict + version_added: 1.4.0 + suboptions: + network: + description: + - Create a network interface on the network identified by UUID. + Use 'public' instead of an UUID to attach a public network interface. + Can be omitted if a subnet is provided under addresses. + type: str + addresses: + description: + - Attach a private network interface and configure a subnet and/or an IP address. + type: list + elements: dict + suboptions: + subnet: + description: + - UUID of the subnet from which an address will be assigned. + type: str + address: + description: + - The static IP address of the interface. Use '[]' to avoid assigning an IP address via DHCP. + type: str + server_groups: + description: + - List of UUID or names of server groups. + type: list + elements: str + user_data: + description: + - Cloud-init configuration (cloud-config) data to use for the server. + type: str + force: + description: + - Allow to stop the running server for updating if necessary. + default: no + type: bool + tags: + description: + - Tags assosiated with the servers. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = ''' +# Create and start a server with an existing server group (shiny-group) +- name: Start cloudscale.ch server + cloudscale_ch.cloud.server: + name: my-shiny-cloudscale-server + image: debian-10 + flavor: flex-4-4 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + server_groups: shiny-group + zone: lpg1 + use_private_network: True + bulk_volume_size_gb: 100 + api_token: xxxxxx + +# Start another server in anti-affinity (server group shiny-group) +- name: Start second cloudscale.ch server + cloudscale_ch.cloud.server: + name: my-other-shiny-server + image: ubuntu-16.04 + flavor: flex-8-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + server_groups: shiny-group + zone: lpg1 + api_token: xxxxxx + +# Force to update the flavor of a running server +- name: Start cloudscale.ch server + cloudscale_ch.cloud.server: + name: my-shiny-cloudscale-server + image: debian-10 + flavor: flex-8-2 + force: yes + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + use_private_network: True + bulk_volume_size_gb: 100 + api_token: xxxxxx + register: server1 + +# Stop the first server +- name: Stop my first server + cloudscale_ch.cloud.server: + uuid: '{{ server1.uuid }}' + state: stopped + api_token: xxxxxx + +# Delete my second server +- name: Delete my second server + cloudscale_ch.cloud.server: + name: my-other-shiny-server + state: absent + api_token: xxxxxx + +# Start a server and wait for the SSH host keys to be generated +- name: Start server and wait for SSH host keys + cloudscale_ch.cloud.server: + name: my-cloudscale-server-with-ssh-key + image: debian-10 + flavor: flex-4-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + api_token: xxxxxx + register: server + until: server is not failed + retries: 5 + delay: 2 + +# Start a server with two network interfaces: +# +# A public interface with IPv4/IPv6 +# A private interface on a specific private network with an IPv4 address + +- name: Start a server with a public and private network interface + cloudscale_ch.cloud.server: + name: my-cloudscale-server-with-two-network-interfaces + image: debian-10 + flavor: flex-4-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + api_token: xxxxxx + interfaces: + - network: 'public' + - addresses: + - subnet: UUID_of_private_subnet + +# Start a server with a specific IPv4 address from subnet range +- name: Start a server with a specific IPv4 address from subnet range + cloudscale_ch.cloud.server: + name: my-cloudscale-server-with-specific-address + image: debian-10 + flavor: flex-4-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + api_token: xxxxxx + interfaces: + - addresses: + - subnet: UUID_of_private_subnet + address: 'A.B.C.D' + +# Start a server with two network interfaces: +# +# A public interface with IPv4/IPv6 +# A private interface on a specific private network with no IPv4 address + +- name: Start a server with a private network interface and no IP address + cloudscale_ch.cloud.server: + name: my-cloudscale-server-with-specific-address + image: debian-10 + flavor: flex-4-2 + ssh_keys: + - ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + api_token: xxxxxx + interfaces: + - network: 'public' + - network: UUID_of_private_network + addresses: [] +''' + +RETURN = ''' +href: + description: API URL to get details about this server + returned: success when not state == absent + type: str + sample: https://api.cloudscale.ch/v1/servers/cfde831a-4e87-4a75-960f-89b0148aa2cc +uuid: + description: The unique identifier for this server + returned: success + type: str + sample: cfde831a-4e87-4a75-960f-89b0148aa2cc +name: + description: The display name of the server + returned: success + type: str + sample: its-a-me-mario.cloudscale.ch +state: + description: The current status of the server + returned: success + type: str + sample: running +flavor: + description: The flavor that has been used for this server + returned: success when not state == absent + type: dict + sample: { "slug": "flex-4-2", "name": "Flex-4-2", "vcpu_count": 2, "memory_gb": 4 } +image: + description: The image used for booting this server + returned: success when not state == absent + type: dict + sample: { "default_username": "ubuntu", "name": "Ubuntu 18.04 LTS", "operating_system": "Ubuntu", "slug": "ubuntu-18.04" } +zone: + description: The zone used for booting this server + returned: success when not state == absent + type: dict + sample: { 'slug': 'lpg1' } +volumes: + description: List of volumes attached to the server + returned: success when not state == absent + type: list + sample: [ {"type": "ssd", "device": "/dev/vda", "size_gb": "50"} ] +interfaces: + description: List of network ports attached to the server + returned: success when not state == absent + type: list + sample: [ { "type": "public", "addresses": [ ... ] } ] +ssh_fingerprints: + description: A list of SSH host key fingerprints. Will be null until the host keys could be retrieved from the server. + returned: success when not state == absent + type: list + sample: ["ecdsa-sha2-nistp256 SHA256:XXXX", ... ] +ssh_host_keys: + description: A list of SSH host keys. Will be null until the host keys could be retrieved from the server. + returned: success when not state == absent + type: list + sample: ["ecdsa-sha2-nistp256 XXXXX", ... ] +server_groups: + description: List of server groups + returned: success when not state == absent + type: list + sample: [ {"href": "https://api.cloudscale.ch/v1/server-groups/...", "uuid": "...", "name": "db-group"} ] +tags: + description: Tags assosiated with the server. + returned: success + type: dict + sample: { 'project': 'my project' } +''' + +from datetime import datetime, timedelta +from time import sleep +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + +ALLOWED_STATES = ('running', + 'stopped', + 'absent', + ) + + +class AnsibleCloudscaleServer(AnsibleCloudscaleBase): + + def __init__(self, module): + super(AnsibleCloudscaleServer, self).__init__(module) + + # Initialize server dictionary + self._info = {} + + def _init_server_container(self): + return { + 'uuid': self._module.params.get('uuid') or self._info.get('uuid'), + 'name': self._module.params.get('name') or self._info.get('name'), + 'state': 'absent', + } + + def _get_server_info(self, refresh=False): + if self._info and not refresh: + return self._info + + self._info = self._init_server_container() + + uuid = self._info.get('uuid') + if uuid is not None: + server_info = self._get('servers/%s' % uuid) + if server_info: + self._info = self._transform_state(server_info) + + else: + name = self._info.get('name') + if name is not None: + servers = self._get('servers') or [] + matching_server = [] + for server in servers: + if server['name'] == name: + matching_server.append(server) + + if len(matching_server) == 1: + self._info = self._transform_state(matching_server[0]) + elif len(matching_server) > 1: + self._module.fail_json(msg="More than one server with name '%s' exists. " + "Use the 'uuid' parameter to identify the server." % name) + + return self._info + + @staticmethod + def _transform_state(server): + if 'status' in server: + server['state'] = server['status'] + del server['status'] + else: + server['state'] = 'absent' + return server + + def _wait_for_state(self, states): + start = datetime.now() + timeout = self._module.params['api_timeout'] * 2 + while datetime.now() - start < timedelta(seconds=timeout): + server_info = self._get_server_info(refresh=True) + if server_info.get('state') in states: + return server_info + sleep(1) + + # Timeout succeeded + if server_info.get('name') is not None: + msg = "Timeout while waiting for a state change on server %s to states %s. " \ + "Current state is %s." % (server_info.get('name'), states, server_info.get('state')) + else: + name_uuid = self._module.params.get('name') or self._module.params.get('uuid') + msg = 'Timeout while waiting to find the server %s' % name_uuid + + self._module.fail_json(msg=msg) + + def _start_stop_server(self, server_info, target_state="running", ignore_diff=False): + actions = { + 'stopped': 'stop', + 'running': 'start', + } + + server_state = server_info.get('state') + if server_state != target_state: + self._result['changed'] = True + + if not ignore_diff: + self._result['diff']['before'].update({ + 'state': server_info.get('state'), + }) + self._result['diff']['after'].update({ + 'state': target_state, + }) + if not self._module.check_mode: + self._post('servers/%s/%s' % (server_info['uuid'], actions[target_state])) + server_info = self._wait_for_state((target_state, )) + + return server_info + + def _update_param(self, param_key, server_info, requires_stop=False): + param_value = self._module.params.get(param_key) + if param_value is None: + return server_info + + if 'slug' in server_info[param_key]: + server_v = server_info[param_key]['slug'] + else: + server_v = server_info[param_key] + + if server_v != param_value: + # Set the diff output + self._result['diff']['before'].update({param_key: server_v}) + self._result['diff']['after'].update({param_key: param_value}) + + if server_info.get('state') == "running": + if requires_stop and not self._module.params.get('force'): + self._module.warn("Some changes won't be applied to running servers. " + "Use force=yes to allow the server '%s' to be stopped/started." % server_info['name']) + return server_info + + # Either the server is stopped or change is forced + self._result['changed'] = True + if not self._module.check_mode: + + if requires_stop: + self._start_stop_server(server_info, target_state="stopped", ignore_diff=True) + + patch_data = { + param_key: param_value, + } + + # Response is 204: No Content + self._patch('servers/%s' % server_info['uuid'], patch_data) + + # State changes to "changing" after update, waiting for stopped/running + server_info = self._wait_for_state(('stopped', 'running')) + + return server_info + + def _get_server_group_ids(self): + server_group_params = self._module.params['server_groups'] + if not server_group_params: + return None + + matching_group_names = [] + results = [] + server_groups = self._get('server-groups') + for server_group in server_groups: + if server_group['uuid'] in server_group_params: + results.append(server_group['uuid']) + server_group_params.remove(server_group['uuid']) + + elif server_group['name'] in server_group_params: + results.append(server_group['uuid']) + server_group_params.remove(server_group['name']) + # Remember the names found + matching_group_names.append(server_group['name']) + + # Names are not unique, verify if name already found in previous iterations + elif server_group['name'] in matching_group_names: + self._module.fail_json(msg="More than one server group with name exists: '%s'. " + "Use the 'uuid' parameter to identify the server group." % server_group['name']) + + if server_group_params: + self._module.fail_json(msg="Server group name or UUID not found: %s" % ', '.join(server_group_params)) + + return results + + def _create_server(self, server_info): + self._result['changed'] = True + self.normalize_interfaces_param() + + data = deepcopy(self._module.params) + for i in ('uuid', 'state', 'force', 'api_timeout', 'api_token', 'api_url'): + del data[i] + data['server_groups'] = self._get_server_group_ids() + + self._result['diff']['before'] = self._init_server_container() + self._result['diff']['after'] = deepcopy(data) + if not self._module.check_mode: + self._post('servers', data) + server_info = self._wait_for_state(('running', )) + return server_info + + def _update_server(self, server_info): + + previous_state = server_info.get('state') + + # The API doesn't support to update server groups. + # Show a warning to the user if the desired state does not match. + desired_server_group_ids = self._get_server_group_ids() + if desired_server_group_ids is not None: + current_server_group_ids = [grp['uuid'] for grp in server_info['server_groups']] + if desired_server_group_ids != current_server_group_ids: + self._module.warn("Server groups can not be mutated, server needs redeployment to change groups.") + + # Remove interface properties that were not filled out by the user + self.normalize_interfaces_param() + + # Compare the interfaces as specified by the user, with the interfaces + # as received by the API. The structures are somewhat different, so + # they need to be evaluated in detail + wanted = self._module.params.get('interfaces') + actual = server_info.get('interfaces') + + try: + update_interfaces = not self.has_wanted_interfaces(wanted, actual) + except KeyError as e: + self._module.fail_json( + msg="Error checking 'interfaces', missing key: %s" % e.args[0]) + + if update_interfaces: + server_info = self._update_param('interfaces', server_info) + + if not self._result['changed']: + self._result['changed'] = server_info['interfaces'] != actual + + server_info = self._update_param('flavor', server_info, requires_stop=True) + server_info = self._update_param('name', server_info) + server_info = self._update_param('tags', server_info) + + if previous_state == "running": + server_info = self._start_stop_server(server_info, target_state="running", ignore_diff=True) + + return server_info + + def present_server(self): + server_info = self._get_server_info() + + if server_info.get('state') != "absent": + + # If target state is stopped, stop before an potential update and force would not be required + if self._module.params.get('state') == "stopped": + server_info = self._start_stop_server(server_info, target_state="stopped") + + server_info = self._update_server(server_info) + + if self._module.params.get('state') == "running": + server_info = self._start_stop_server(server_info, target_state="running") + else: + server_info = self._create_server(server_info) + server_info = self._start_stop_server(server_info, target_state=self._module.params.get('state')) + + return server_info + + def absent_server(self): + server_info = self._get_server_info() + if server_info.get('state') != "absent": + self._result['changed'] = True + self._result['diff']['before'] = deepcopy(server_info) + self._result['diff']['after'] = self._init_server_container() + if not self._module.check_mode: + self._delete('servers/%s' % server_info['uuid']) + server_info = self._wait_for_state(('absent', )) + return server_info + + def has_wanted_interfaces(self, wanted, actual): + """ Compares the interfaces as specified by the user, with the + interfaces as reported by the server. + + """ + + if len(wanted or ()) != len(actual or ()): + return False + + def match_interface(spec): + + # First, find the interface that belongs to the spec + for interface in actual: + + # If we have a public network, only look for the right type + if spec.get('network') == 'public': + if interface['type'] == 'public': + break + + # If we have a private network, check the network's UUID + if spec.get('network') is not None: + if interface['type'] == 'private': + if interface['network']['uuid'] == spec['network']: + break + + # If we only have an addresses block, match all subnet UUIDs + wanted_subnet_ids = set( + a['subnet'] for a in (spec.get('addresses') or ())) + + actual_subnet_ids = set( + a['subnet']['uuid'] for a in interface['addresses']) + + if wanted_subnet_ids == actual_subnet_ids: + break + else: + return False # looped through everything without match + + # Fail if any of the addresses don't match + for wanted_addr in (spec.get('addresses') or ()): + + # Unspecified, skip + if 'address' not in wanted_addr: + continue + + addresses = set(a['address'] for a in interface['addresses']) + if wanted_addr['address'] not in addresses: + return False + + # If the wanted address is an empty list, but the actual list is + # not, the user wants to remove automatically set addresses + if spec.get('addresses') == [] and interface['addresses'] != []: + return False + + if interface['addresses'] == [] and spec.get('addresses') != []: + return False + + return interface + + for spec in wanted: + + # If there is any interface that does not match, clearly not all + # wanted interfaces are present + if not match_interface(spec): + return False + + return True + + def normalize_interfaces_param(self): + """ Goes through the interfaces parameter and gets it ready to be + sent to the API. """ + + for spec in (self._module.params.get('interfaces') or ()): + if spec['addresses'] is None: + del spec['addresses'] + if spec['network'] is None: + del spec['network'] + + for address in (spec.get('addresses') or ()): + if address['address'] is None: + del address['address'] + if address['subnet'] is None: + del address['subnet'] + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + state=dict(default='running', choices=ALLOWED_STATES), + name=dict(), + uuid=dict(), + flavor=dict(), + image=dict(), + zone=dict(), + volume_size_gb=dict(type='int', default=10), + bulk_volume_size_gb=dict(type='int'), + ssh_keys=dict(type='list', elements='str', no_log=False), + password=dict(no_log=True), + use_public_network=dict(type='bool'), + use_private_network=dict(type='bool'), + use_ipv6=dict(type='bool', default=True), + interfaces=dict( + type='list', + elements='dict', + options=dict( + network=dict(type='str'), + addresses=dict( + type='list', + elements='dict', + options=dict( + address=dict(type='str'), + subnet=dict(type='str'), + ), + ), + ), + ), + server_groups=dict(type='list', elements='str'), + user_data=dict(), + force=dict(type='bool', default=False), + tags=dict(type='dict'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=( + ['interfaces', 'use_public_network'], + ['interfaces', 'use_private_network'], + ), + required_one_of=(('name', 'uuid'),), + supports_check_mode=True, + ) + + cloudscale_server = AnsibleCloudscaleServer(module) + if module.params['state'] == "absent": + server = cloudscale_server.absent_server() + else: + server = cloudscale_server.present_server() + + result = cloudscale_server.get_result(server) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py new file mode 100644 index 00000000..f4dc9c31 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: server_group +short_description: Manages server groups on the cloudscale.ch IaaS service +description: + - Create, update and remove server groups. +author: + - René Moser (@resmo) + - Denis Krienbühl (@href) +version_added: "1.0.0" +options: + name: + description: + - Name of the server group. + - Either I(name) or I(uuid) is required. These options are mutually exclusive. + type: str + uuid: + description: + - UUID of the server group. + - Either I(name) or I(uuid) is required. These options are mutually exclusive. + type: str + type: + description: + - Type of the server group. + default: anti-affinity + type: str + zone: + description: + - Zone slug of the server group (e.g. C(lpg1) or C(rma1)). + type: str + state: + description: + - State of the server group. + choices: [ present, absent ] + default: present + type: str + tags: + description: + - Tags assosiated with the server groups. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = ''' +--- +- name: Ensure server group exists + cloudscale_ch.cloud.server_group: + name: my-name + type: anti-affinity + api_token: xxxxxx + +- name: Ensure server group in a specific zone + cloudscale_ch.cloud.server_group: + name: my-rma-group + type: anti-affinity + zone: lpg1 + api_token: xxxxxx + +- name: Ensure a server group is absent + cloudscale_ch.cloud.server_group: + name: my-name + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +--- +href: + description: API URL to get details about this server group + returned: if available + type: str + sample: https://api.cloudscale.ch/v1/server-group/cfde831a-4e87-4a75-960f-89b0148aa2cc +uuid: + description: The unique identifier for this server + returned: always + type: str + sample: cfde831a-4e87-4a75-960f-89b0148aa2cc +name: + description: The display name of the server group + returned: always + type: str + sample: load balancers +type: + description: The type the server group + returned: if available + type: str + sample: anti-affinity +zone: + description: The zone of the server group + returned: success + type: dict + sample: { 'slug': 'rma1' } +servers: + description: A list of servers that are part of the server group. + returned: if available + type: list + sample: [] +state: + description: State of the server group. + returned: always + type: str + sample: present +tags: + description: Tags assosiated with the server group. + returned: success + type: dict + sample: { 'project': 'my project' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + name=dict(type='str'), + uuid=dict(type='str'), + type=dict(type='str', default='anti-affinity'), + zone=dict(type='str'), + tags=dict(type='dict'), + state=dict(default='present', choices=['absent', 'present']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + required_if=(('state', 'present', ('name',),),), + supports_check_mode=True, + ) + + cloudscale_server_group = AnsibleCloudscaleBase( + module, + resource_name='server-groups', + resource_create_param_keys=[ + 'name', + 'type', + 'zone', + 'tags', + ], + resource_update_param_keys=[ + 'name', + 'tags', + ], + ) + cloudscale_server_group.query_constraint_keys = [ + 'zone', + ] + + if module.params['state'] == 'absent': + result = cloudscale_server_group.absent() + else: + result = cloudscale_server_group.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py new file mode 100644 index 00000000..b5e50306 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/subnet.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2020, René Moser <rene.moser@cloudscale.ch> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: subnet +short_description: Manages subnets on the cloudscale.ch IaaS service +description: + - Create, update and remove subnets. +author: + - René Moser (@resmo) +version_added: "1.3.0" +options: + uuid: + description: + - UUID of the subnet. + type: str + cidr: + description: + - The cidr of the subnet. + - Required if I(state=present). + type: str + network: + description: + - The name of the network the subnet is related to. + - Required if I(state=present). + type: dict + suboptions: + uuid: + description: + - The uuid of the network. + type: str + name: + description: + - The uuid of the network. + type: str + zone: + description: + - The zone the network allocated in. + type: str + gateway_address: + description: + - The gateway address of the subnet. If not set, no gateway is used. + - Cannot be within the DHCP range, which is the lowest .101-.254 in the subnet. + type: str + dns_servers: + description: + - A list of DNS resolver IP addresses, that act as DNS servers. + - If not set, the cloudscale.ch default resolvers are used. + type: list + elements: str + reset: + description: + - Resets I(gateway_address) and I(dns_servers) to default values by the API. + - "Note: Idempotency is not given." + type: bool + default: false + state: + description: + - State of the subnet. + choices: [ present, absent ] + default: present + type: str + tags: + description: + - Tags associated with the subnet. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = ''' +--- +- name: Ensure subnet exists + cloudscale_ch.cloud.subnet: + cidr: 172.16.0.0/24 + network: + uuid: 2db69ba3-1864-4608-853a-0771b6885a3a + api_token: xxxxxx + +- name: Ensure subnet exists + cloudscale_ch.cloud.subnet: + cidr: 192.168.1.0/24 + gateway_address: 192.168.1.1 + dns_servers: + - 192.168.1.10 + - 192.168.1.11 + network: + name: private + zone: lpg1 + api_token: xxxxxx + +- name: Ensure a subnet is absent + cloudscale_ch.cloud.subnet: + cidr: 172.16.0.0/24 + network: + name: private + zone: lpg1 + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +--- +href: + description: API URL to get details about the subnet. + returned: success + type: str + sample: https://api.cloudscale.ch/v1/subnets/33333333-1864-4608-853a-0771b6885a3 +uuid: + description: The unique identifier for the subnet. + returned: success + type: str + sample: 33333333-1864-4608-853a-0771b6885a3 +cidr: + description: The CIDR of the subnet. + returned: success + type: str + sample: 172.16.0.0/24 +network: + description: The network object of the subnet. + returned: success + type: complex + contains: + href: + description: API URL to get details about the network. + returned: success + type: str + sample: https://api.cloudscale.ch/v1/networks/33333333-1864-4608-853a-0771b6885a3 + uuid: + description: The unique identifier for the network. + returned: success + type: str + sample: 33333333-1864-4608-853a-0771b6885a3 + name: + description: The name of the network. + returned: success + type: str + sample: my network + zone: + description: The zone the network is allocated in. + returned: success + type: dict + sample: { 'slug': 'rma1' } + version_added: 1.4.0 +gateway_address: + description: The gateway address of the subnet. + returned: success + type: str + sample: "192.168.42.1" +dns_servers: + description: List of DNS resolver IP addresses. + returned: success + type: list + sample: ["9.9.9.9", "149.112.112.112"] +state: + description: State of the subnet. + returned: success + type: str + sample: present +tags: + description: Tags associated with the subnet. + returned: success + type: dict + sample: { 'project': 'my project' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + + +class AnsibleCloudscaleSubnet(AnsibleCloudscaleBase): + + def __init__(self, module): + super(AnsibleCloudscaleSubnet, self).__init__( + module=module, + resource_name='subnets', + resource_key_name='cidr', + resource_create_param_keys=[ + 'cidr', + 'gateway_address', + 'dns_servers', + 'tags', + ], + resource_update_param_keys=[ + 'gateway_address', + 'dns_servers', + 'tags', + ], + ) + self._network = None + + def query_network(self, uuid=None): + if self._network is not None: + return self._network + + net_param = self._module.params['network'] + net_uuid = uuid or net_param['uuid'] + + if net_uuid is not None: + network = self._get('networks/%s' % net_uuid) + if not network: + self._module.fail_json(msg="Network with 'uuid' not found: %s" % net_uuid) + + elif net_param['name'] is not None: + networks_found = [] + networks = self._get('networks') + for network in networks or []: + # Skip networks in other zones + if net_param['zone'] is not None and network['zone']['slug'] != net_param['zone']: + continue + + if network.get('name') == net_param['name']: + networks_found.append(network) + + if not networks_found: + msg = "Network with 'name' not found: %s" % net_param['name'] + self._module.fail_json(msg=msg) + + elif len(networks_found) == 1: + network = networks_found[0] + + # We might have found more than one network with identical name + else: + msg = ("Multiple networks with 'name' not found: %s." + "Add the 'zone' to distinguish or use 'uuid' argument to specify the network." % net_param['name']) + self._module.fail_json(msg=msg) + + else: + self._module.fail_json(msg="Either Network UUID or name is required.") + + # For consistency, take a minimal network stub, but also include zone + self._network = dict() + for k, v in network.items(): + if k in ['name', 'uuid', 'href', 'zone']: + self._network[k] = v + + return self._network + + def create(self, resource): + resource['network'] = self.query_network() + + data = { + 'network': resource['network']['uuid'], + } + return super(AnsibleCloudscaleSubnet, self).create(resource, data) + + def update(self, resource): + # Resets to default values by the API + if self._module.params.get('reset'): + for key in ('dns_servers', 'gateway_address',): + # No need to reset if user set the param anyway. + if self._module.params.get(key) is None: + self._result['changed'] = True + patch_data = { + key: None + } + if not self._module.check_mode: + href = resource.get('href') + if not href: + self._module.fail_json(msg='Unable to update %s, no href found.' % key) + self._patch(href, patch_data, filter_none=False) + + if not self._module.check_mode: + resource = self.query() + + return super(AnsibleCloudscaleSubnet, self).update(resource) + + def get_result(self, resource): + if resource and 'network' in resource: + resource['network'] = self.query_network(uuid=resource['network']['uuid']) + return super(AnsibleCloudscaleSubnet, self).get_result(resource) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + uuid=dict(type='str'), + cidr=dict(type='str'), + network=dict( + type='dict', + options=dict( + uuid=dict(type='str'), + name=dict(type='str'), + zone=dict(type='str'), + ), + ), + gateway_address=dict(type='str'), + dns_servers=dict(type='list', elements='str', default=None), + tags=dict(type='dict'), + reset=dict(type='bool', default=False), + state=dict(default='present', choices=['absent', 'present']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('cidr', 'uuid',),), + required_together=(('cidr', 'network',),), + required_if=(('state', 'present', ('cidr', 'network',),),), + supports_check_mode=True, + ) + + cloudscale_subnet = AnsibleCloudscaleSubnet(module) + + if module.params['state'] == 'absent': + result = cloudscale_subnet.absent() + else: + result = cloudscale_subnet.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py b/ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py new file mode 100644 index 00000000..ecc6cfcc --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/plugins/modules/volume.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch> +# Copyright (c) 2019, René Moser <mail@renemoser.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: volume +short_description: Manages volumes on the cloudscale.ch IaaS service. +description: + - Create, attach/detach, update and delete volumes on the cloudscale.ch IaaS service. +notes: + - To create a new volume at least the I(name) and I(size_gb) options + are required. + - A volume can be created and attached to a server in the same task. +author: + - Gaudenz Steinlin (@gaudenz) + - René Moser (@resmo) + - Denis Krienbühl (@href) +version_added: "1.0.0" +options: + state: + description: + - State of the volume. + default: present + choices: [ present, absent ] + type: str + name: + description: + - Name of the volume. Either name or UUID must be present to change an + existing volume. + type: str + uuid: + description: + - UUID of the volume. Either name or UUID must be present to change an + existing volume. + type: str + size_gb: + description: + - Size of the volume in GB. + type: int + type: + description: + - Type of the volume. Cannot be changed after creating the volume. + Defaults to C(ssd) on volume creation. + choices: [ ssd, bulk ] + type: str + zone: + description: + - Zone in which the volume resides (e.g. C(lpg1) or C(rma1)). Cannot be + changed after creating the volume. Defaults to the project default zone. + type: str + servers: + description: + - UUIDs of the servers this volume is attached to. Set this to C([]) to + detach the volume. Currently a volume can only be attached to a + single server. + - The aliases C(server_uuids) and C(server_uuid) are deprecated and will + be removed in version 3.0.0 of this collection. + aliases: [ server_uuids, server_uuid ] + type: list + elements: str + tags: + description: + - Tags associated with the volume. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: cloudscale_ch.cloud.api_parameters +''' + +EXAMPLES = ''' +# Create a new SSD volume +- name: Create an SSD volume + cloudscale_ch.cloud.volume: + name: my_ssd_volume + zone: 'lpg1' + size_gb: 50 + api_token: xxxxxx + register: my_ssd_volume + +# Attach an existing volume to a server +- name: Attach volume to server + cloudscale_ch.cloud.volume: + uuid: "{{ my_ssd_volume.uuid }}" + servers: + - ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Create and attach a volume to a server +- name: Create and attach volume to server + cloudscale_ch.cloud.volume: + name: my_ssd_volume + zone: 'lpg1' + size_gb: 50 + servers: + - ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Detach volume from server +- name: Detach volume from server + cloudscale_ch.cloud.volume: + uuid: "{{ my_ssd_volume.uuid }}" + servers: [] + api_token: xxxxxx + +# Delete a volume +- name: Delete volume + cloudscale_ch.cloud.volume: + name: my_ssd_volume + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +href: + description: The API URL to get details about this volume. + returned: state == present + type: str + sample: https://api.cloudscale.ch/v1/volumes/2db69ba3-1864-4608-853a-0771b6885a3a +uuid: + description: The unique identifier for this volume. + returned: state == present + type: str + sample: 2db69ba3-1864-4608-853a-0771b6885a3a +name: + description: The display name of the volume. + returned: state == present + type: str + sample: my_ssd_volume +size_gb: + description: The size of the volume in GB. + returned: state == present + type: str + sample: 50 +type: + description: The type of the volume. + returned: state == present + type: str + sample: bulk +zone: + description: The zone of the volume. + returned: state == present + type: dict + sample: {'slug': 'lpg1'} +server_uuids: + description: The UUIDs of the servers this volume is attached to. This return + value is deprecated and will disappear in the future when the field is + removed from the API. + returned: state == present + type: list + sample: ['47cec963-fcd2-482f-bdb6-24461b2d47b1'] +servers: + description: The list of servers this volume is attached to. + returned: state == present + type: list + sample: [ + { + "href": "https://api.cloudscale.ch/v1/servers/47cec963-fcd2-482f-bdb6-24461b2d47b1", + "name": "my_server", + "uuid": "47cec963-fcd2-482f-bdb6-24461b2d47b1" + } + ] +state: + description: The current status of the volume. + returned: success + type: str + sample: present +tags: + description: Tags associated with the volume. + returned: state == present + type: dict + sample: { 'project': 'my project' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.api import ( + AnsibleCloudscaleBase, + cloudscale_argument_spec, +) + + +class AnsibleCloudscaleVolume(AnsibleCloudscaleBase): + + def create(self, resource): + # Fail when missing params for creation + self._module.fail_on_missing_params(['name', 'size_gb']) + return super(AnsibleCloudscaleVolume, self).create(resource) + + def find_difference(self, key, resource, param): + is_different = False + + if key != 'servers': + return super(AnsibleCloudscaleVolume, self).find_difference(key, resource, param) + + server_has = resource[key] + server_wanted = param + if len(server_wanted) != len(server_has): + is_different = True + else: + for has in server_has: + if has["uuid"] not in server_wanted: + is_different = True + + return is_different + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', choices=('present', 'absent')), + name=dict(type='str'), + uuid=dict(type='str'), + zone=dict(type='str'), + size_gb=dict(type='int'), + type=dict(type='str', choices=('ssd', 'bulk')), + servers=dict(type='list', elements='str', aliases=['server_uuids', 'server_uuid']), + tags=dict(type='dict'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + supports_check_mode=True, + ) + + # TODO remove in version 3.0.0 + if module.params.get('server_uuid') or module.params.get('server_uuids'): + module.deprecate('The aliases "server_uuid" and "server_uuids" have ' + 'been deprecated and will be removed, use "servers" ' + 'instead.', + version='3.0.0', collection_name='cloudscale_ch.cloud') + + cloudscale_volume = AnsibleCloudscaleVolume( + module, + resource_name='volumes', + resource_create_param_keys=[ + 'name', + 'type', + 'zone', + 'size_gb', + 'servers', + 'tags', + ], + resource_update_param_keys=[ + 'name', + 'size_gb', + 'servers', + 'tags', + ], + ) + + if module.params['state'] == 'absent': + result = cloudscale_volume.absent() + else: + result = cloudscale_volume.present() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template b/ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template new file mode 100644 index 00000000..5fa5d5fc --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/cloud-config-cloudscale.ini.template @@ -0,0 +1,2 @@ +[default] +cloudscale_api_token = @API_TOKEN diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/aliases @@ -0,0 +1 @@ +hidden diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml new file mode 100644 index 00000000..81387ff9 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/defaults/main.yml @@ -0,0 +1,20 @@ +--- +# The image to use for test servers +cloudscale_test_image: 'debian-10' + +# Alternate test image to use if a different image is required +cloudscale_alt_test_image: 'ubuntu-20.04' + +# The flavor to use for test servers +cloudscale_test_flavor: 'flex-4-2' + +# SSH key to use for test servers +cloudscale_test_ssh_key: | + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + +# The zone to use to test servers +cloudscale_test_zone: 'lpg1' +cloudscale_test_alt_zone: 'rma1' + +# The region to use to request floating IPs +cloudscale_test_region: 'lpg' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml new file mode 100644 index 00000000..ff2132c5 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_custom_images.yml @@ -0,0 +1,24 @@ +--- +- name: List all custom images + uri: + url: 'https://api.cloudscale.ch/v1/custom-images' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: image_list + until: image_list is not failed + retries: 5 + delay: 3 + +- name: Remove all images created by this test run + cloudscale_ch.cloud.custom_image: + uuid: '{{ item.uuid }}' + state: 'absent' + when: cloudscale_resource_prefix in item.name + with_items: '{{ image_list.json }}' + register: res + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' + until: res is not failed + retries: 5 + delay: 3 diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml new file mode 100644 index 00000000..66360859 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_floating_ips.yml @@ -0,0 +1,19 @@ +--- +- name: List all floating IPs + uri: + url: 'https://api.cloudscale.ch/v1/floating-ips' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: floating_ip_list + +- name: Remove all floating IPs created by this test run + cloudscale_ch.cloud.floating_ip: + # TODO: fix + # ip: '{{ item.network | ipaddr("address") }}' + ip: '{{ item.network.split("/")[0] }}' + state: 'absent' + when: "cloudscale_resource_prefix in (item.reverse_ptr | string ) or ('ansible_name' in item.tags and cloudscale_resource_prefix in item.tags['ansible_name'])" + with_items: '{{ floating_ip_list.json }}' + loop_control: + label: '{{ item.reverse_ptr }} ({{ item.network }})' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml new file mode 100644 index 00000000..e02c83b1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_networks.yml @@ -0,0 +1,17 @@ +--- +- name: List all networks + uri: + url: 'https://api.cloudscale.ch/v1/networks' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: network_list + +- name: Remove all networks created by this test run + cloudscale_ch.cloud.network: + uuid: '{{ item.uuid }}' + state: absent + when: cloudscale_resource_prefix in item.name + with_items: '{{ network_list.json }}' + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml new file mode 100644 index 00000000..2ddb1743 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_objects_users.yml @@ -0,0 +1,17 @@ +--- +- name: List all objects users + uri: + url: 'https://api.cloudscale.ch/v1/objects-users' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: objects_user_list + +- name: Remove all objects users created by this test run + cloudscale_ch.cloud.objects_user: + id: '{{ item.id }}' + state: absent + when: cloudscale_resource_prefix in item.display_name + with_items: '{{ objects_user_list.json }}' + loop_control: + label: '{{ item.display_name }} ({{ item.id }})' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml new file mode 100644 index 00000000..cc85b137 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_server_groups.yml @@ -0,0 +1,17 @@ +--- +- name: List all server groups + uri: + url: 'https://api.cloudscale.ch/v1/server-groups' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: server_group_list + +- name: Remove all server groups created by this test run + cloudscale_ch.cloud.server_group: + uuid: '{{ item.uuid }}' + state: absent + when: cloudscale_resource_prefix in item.name + with_items: '{{ server_group_list.json }}' + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml new file mode 100644 index 00000000..d8c3a0fa --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_servers.yml @@ -0,0 +1,24 @@ +--- +- name: List all servers + uri: + url: 'https://api.cloudscale.ch/v1/servers' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: server_list + until: server_list is not failed + retries: 5 + delay: 3 + +- name: Remove all servers created by this test run + cloudscale_ch.cloud.server: + uuid: '{{ item.uuid }}' + state: 'absent' + when: cloudscale_resource_prefix in item.name + with_items: '{{ server_list.json }}' + register: res + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' + until: res is not failed + retries: 5 + delay: 3 diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml new file mode 100644 index 00000000..7ce17ba3 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/cleanup_volumes.yml @@ -0,0 +1,17 @@ +--- +- name: List all volumes + uri: + url: 'https://api.cloudscale.ch/v1/volumes' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: volume_list + +- name: Remove all volumes created by this test run + cloudscale_ch.cloud.volume: + uuid: '{{ item.uuid }}' + state: 'absent' + when: cloudscale_resource_prefix in item.name + with_items: '{{ volume_list.json }}' + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml new file mode 100644 index 00000000..fa0be6eb --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/common/tasks/main.yml @@ -0,0 +1,6 @@ +--- +# Password to use for test server +# This has to be set as a fact, otherwise a new password will be generated +# on every variable access. +- set_fact: + cloudscale_test_password: "{{ lookup('password', '/dev/null length=15 chars=ascii_letters') }}" diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml new file mode 100644 index 00000000..630ca4f5 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +image_url: https://at-images.objects.lpg.cloudscale.ch/alpine diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml new file mode 100644 index 00000000..a2f4ff52 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- block: + - import_tasks: tests.yml + always: + - import_role: + name: common + tasks_from: cleanup_custom_images diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml new file mode 100644 index 00000000..c82de757 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/custom_image/tasks/tests.yml @@ -0,0 +1,415 @@ +--- +- name: Delete a non existent custom image by uuid + cloudscale_ch.cloud.custom_image: + uuid: '415caea5-da7c-4aaa-aaaa-ececd38fb8ea' + state: absent + register: delete +- name: Verify Delete a non existent custom image by uuid + assert: + that: + - delete is not changed + +- name: Fail delete a non existent custom image by name + cloudscale_ch.cloud.custom_image: + name: this-image-is-non-existent + state: absent + register: delete +- name: Verify Fail delete a non existend custom image by name + assert: + that: + - delete is not changed + +- name: Fail import a custom image without url + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image" + state: present + slug: custom-ansible-image + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: failed_import + ignore_errors: true +- name: Verify Fail import a custom image without url + assert: + that: + - failed_import is failed + - failed_import.msg.startswith('Cannot import a new image without url.') + +- name: Import a custom image and wait for import + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image" + state: present + slug: custom-ansible-image + url: "{{ image_url }}" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: image1 + retries: 15 + delay: 5 + until: image1.import_status == 'success' + failed_when: image1.import_status == 'failed' +- name: Verify import a custom image and wait for import + assert: + that: + - image1.import_status == 'success' + - image1.name == "{{ cloudscale_resource_prefix }}-test-image" + +- name: Import a custom image (idempotency) + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image" + state: present + slug: custom-ansible-image + url: "{{ image_url }}" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: image +- name: Verify import a custom image (idempotency) + assert: + that: + - image is not changed + - image.name == "{{ cloudscale_resource_prefix }}-test-image" + - image.uuid == image1.uuid + +- name: Import a custom image with bad url + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image2" + state: present + slug: custom-ansible-image + url: "{{ image_url }}-non-existent" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: failed_import + ignore_errors: true + retries: 15 + delay: 5 + until: failed_import.import_status == 'failed' + failed_when: failed_import.import_status == 'failed' +- name: Verify Fail import a custom image with bad url + assert: + that: + - failed_import is failed + - failed_import.error_message.startswith('Expected HTTP 200, got HTTP 40') + +- name: Import a custom image with bad url (idempotency) + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image2" + state: present + slug: custom-ansible-image + url: "{{ image_url }}-non-existent" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: failed_import_idempotency + ignore_errors: true + retries: 15 + delay: 5 + until: failed_import_idempotency.import_status == 'failed' + failed_when: failed_import_idempotency.import_status == 'failed' +- name: Verify Fail import a custom image with bad url (idempotency) + assert: + that: + - failed_import_idempotency is failed + - failed_import_idempotency.error_message.startswith('Expected HTTP 200, got HTTP 40') + - failed_import.uuid == failed_import_idempotency.uuid + +# This task should not loop like the ones above because otherwise each +# invocation in the loop will create a new import because of the "force_retry" +# parameter. It just check that a new import is created everything else is +# already covered above. +- name: Import a custom image with bad url (retry) + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image2" + state: present + slug: custom-ansible-image + url: "{{ image_url }}-non-existent" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + force_retry: true + register: failed_import_retry +- name: Verify Fail import a custom image with bad url (retry) + assert: + that: + - failed_import.uuid != failed_import_retry.uuid + +- name: Import a custom image and a failed import with that name exists + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image2" + state: present + slug: custom-ansible-image + url: "{{ image_url }}" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: image2 +- name: Verify import a custom image + assert: + that: + - image2 is changed + - image2.name == "{{ cloudscale_resource_prefix }}-test-image2" + - image2.uuid != image1.uuid + +- name: Wait for import + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + retries: 15 + delay: 5 + register: import_status + until: import_status.import_status == 'success' + failed_when: import_status.import_status == 'failed' +- name: Verify Wait for import + assert: + that: + - import_status is not changed + - import_status.name == "{{ cloudscale_resource_prefix }}-test-image2" + +- name: Get image by name + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image2" + register: image_by_name +- name: Verify get image by name + assert: + that: + - image_by_name is not changed + - image_by_name.uuid == image2.uuid + +- name: Change the name of an image + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image-with-a-new-name" + uuid: "{{ image2.uuid }}" + register: change_name +- name: Verify Change the name of an image + assert: + that: + - change_name.name == "{{ cloudscale_resource_prefix }}-test-image-with-a-new-name" + - change_name is changed + +- name: Update slug of a custom image + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + slug: ansible-image-slug + register: image +- name: Verify update slug of a custom image + assert: + that: + - image is changed + - image.slug == 'ansible-image-slug' + +- name: Get custom image with updated slug + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + register: image +- name: Verify update slug of a custom image + assert: + that: + - image.slug == "ansible-image-slug" + +- name: Update tags of a custom image + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + tags: + project: luna + register: image +- name: Verify update tags of a custom image + assert: + that: + - image is changed + - image.tags == "project: luna" + +- name: Update user_data_handling of a custom image + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + user_data_handling: 'extend-cloud-config' + register: image +- name: Verify update user_data_handling of a custom image + assert: + that: + - image is changed + - image.user_data_handling == 'extend-cloud-config' + +- name: Get custom image with updated user_data_handling + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + register: image +- name: Verify update user_data_handling of a custom image + assert: + that: + - image.user_data_handling == "extend-cloud-config" + +- name: Update slug, tags and user_data_handling of a custom image + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.uuid }}" + slug: yet-another-slug + tags: + project: jupiter + user_data_handling: 'pass-through' + register: image +- name: Verify update slug, tags and user_data_handling of a custom image + assert: + that: + - image is changed + - image.slug == "yet-another-slug" + - image.tags == "project: jupiter" + - image.user_data_handling == "pass-through" + +- name: List all custom images, there should be two + uri: + url: 'https://api.cloudscale.ch/v1/custom-images' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: image_list +- name: Verify that two custom images are created by this test run + assert: + that: + - image_list.json | selectattr("name","search", "{{ cloudscale_resource_prefix }}" ) | list | length == 2 + +- name: Delete image by uuid + cloudscale_ch.cloud.custom_image: + uuid: "{{ image1.uuid }}" + state: absent + register: image1 +- name: Verify delete image by uuid + assert: + that: + - image1 is changed + - image1.state == 'absent' + +- name: Delete image by name + cloudscale_ch.cloud.custom_image: + name: "{{ change_name.name }}" + state: absent + register: image2 +- name: Verify delete image by name + assert: + that: + - image2 is changed + - image2.state == 'absent' +- name: Check if all images got deleted + uri: + url: 'https://api.cloudscale.ch/v1/custom-images' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: image_list +- name: Verify that the two custom images of this test run are deleted + assert: + that: + - image_list.json | selectattr("name","search", "{{ cloudscale_resource_prefix }}" ) | list | length == 0 + +- name: Import a custom image + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image" + state: present + slug: custom-ansible-image + url: "{{ image_url }}" + zones: lpg1 + user_data_handling: 'pass-through' + tags: + project: mars + source_format: raw + register: image1 + +- name: Import a custom for the second time (uri module) + uri: + url: 'https://api.cloudscale.ch/v1/custom-images/import' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + url: "{{ image_url }}" + name: "{{ cloudscale_resource_prefix }}-test-image" + slug: 'custom-ansible-image' + zones: + - 'rma1' + user_data_handling: 'pass-through' + tags: + project: 'mars' + source_format: 'raw' + body_format: json + status_code: 201 + register: image2 + +- name: Wait for import of first image + cloudscale_ch.cloud.custom_image: + uuid: "{{ image1.uuid }}" + retries: 15 + delay: 5 + register: import_status + until: import_status.import_status == 'success' + failed_when: import_status.import_status == 'failed' + +- name: Wait for import of second image + cloudscale_ch.cloud.custom_image: + uuid: "{{ image2.json.uuid }}" + retries: 15 + delay: 5 + register: import_status + until: import_status.import_status == 'success' + failed_when: import_status.import_status == 'failed' + +- name: Fail get image by name when two exist + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image" + register: failed_image_by_name + ignore_errors: true +- name: Verify Fail get image by name when two exist + assert: + that: + - failed_image_by_name is failed + - failed_image_by_name.msg.startswith("More than one custom-images resource with 'name' exists") + +- name: Import a custom image with firmware type uefi and wait for import + cloudscale_ch.cloud.custom_image: + name: "{{ cloudscale_resource_prefix }}-test-image-uefi" + state: present + slug: custom-ansible-image-uefi + url: "{{ image_url }}" + zones: lpg1 + user_data_handling: 'pass-through' + firmware_type: 'uefi' + source_format: raw + register: image1_uefi + retries: 15 + delay: 5 + until: image1_uefi.import_status == 'success' + failed_when: image1_uefi.import_status == 'failed' +- name: Verify import a custom image and wait for import + assert: + that: + - image1_uefi.import_status == 'success' + - image1_uefi.name == "{{ cloudscale_resource_prefix }}-test-image-uefi" + +- name: Fail changing the firmware type of an already imported image from uefi to bios + cloudscale_ch.cloud.custom_image: + name: "{{ image1_uefi.name }}" + state: present + slug: "{{ image1_uefi.slug }}" + url: "{{ image_url }}" + zones: lpg1 + user_data_handling: 'pass-through' + firmware_type: 'bios' + source_format: raw + register: fail_firmware_change + ignore_errors: true +- name: Verify firmware type change failed + assert: + that: + - fail_firmware_change is failed diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml new file mode 100644 index 00000000..d58d19b6 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/floating_ip.yml @@ -0,0 +1,158 @@ +- name: Request regional floating IP in check mode + cloudscale_ch.cloud.floating_ip: + name: '{{ cloudscale_resource_prefix }}-floating-ip' + server: '{{ test01.uuid }}' + ip_version: '{{ item.ip_version }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + region: '{{ cloudscale_test_region }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + check_mode: true + register: floating_ip +- name: Verify request floating IP in check mode + assert: + that: + - floating_ip is changed + - floating_ip.state == 'absent' + +- name: Request regional floating IP + cloudscale_ch.cloud.floating_ip: + name: '{{ cloudscale_resource_prefix }}-floating-ip' + server: '{{ test01.uuid }}' + ip_version: '{{ item.ip_version }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + region: '{{ cloudscale_test_region }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: floating_ip +- name: Verify request regional floating IP + assert: + that: + - floating_ip is changed + - floating_ip.region.slug == cloudscale_test_region + # - (item.ip_version == 4 and floating_ip.ip | ipv4) or (item.ip_version == 6 and floating_ip.ip | ipv6) + - floating_ip.server == test01.uuid + - floating_ip.tags.project == 'ansible-test' + - floating_ip.tags.stage == 'production' + - floating_ip.tags.sla == '24-7' + +- name: Request regional floating IP idempotence + cloudscale_ch.cloud.floating_ip: + name: '{{ cloudscale_resource_prefix }}-floating-ip' + server: '{{ test01.uuid }}' + ip_version: '{{ item.ip_version }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + region: '{{ cloudscale_test_region }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: floating_ip_idempotence +- name: Verify request regional floating IP idempotence + assert: + that: + - floating_ip_idempotence is not changed + - floating_ip_idempotence.server == test01.uuid + - floating_ip.region.slug == cloudscale_test_region + - floating_ip.tags.project == 'ansible-test' + - floating_ip.tags.stage == 'production' + - floating_ip.tags.sla == '24-7' + +- name: Request regional floating IP different IP version in check mode + cloudscale_ch.cloud.floating_ip: + name: '{{ cloudscale_resource_prefix }}-floating-ip' + ip_version: '{{ 6 if item.ip_version == 4 else 4 }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + region: '{{ cloudscale_test_region }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: floating_ip_version_differ + check_mode: true +- name: Verify Request regional floating IP different IP version in check mode + assert: + that: + - floating_ip_version_differ is changed + - not floating_ip_version_differ.network + +- name: Request global floating IP + cloudscale_ch.cloud.floating_ip: + name: '{{ cloudscale_resource_prefix }}-global-floating-ip' + ip_version: '{{ item.ip_version }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + type: 'global' + register: global_floating_ip +- name: Verify global floating IP + assert: + that: + - global_floating_ip is changed + - global_floating_ip.region == None + - global_floating_ip.type == 'global' + # - (item.ip_version == 4 and global_floating_ip.ip | ipv4) or (item.ip_version == 6 and global_floating_ip.ip | ipv6) + - not global_floating_ip.server + +- name: Remove global floating IP + cloudscale_ch.cloud.floating_ip: + ip: '{{ global_floating_ip.ip }}' + state: 'absent' + register: global_floating_ip +- name: Verify release of global floating IP + assert: + that: + - global_floating_ip is changed + - global_floating_ip.state == 'absent' + +- name: Move floating IP to second server + cloudscale_ch.cloud.floating_ip: + server: '{{ test02.uuid }}' + ip: '{{ floating_ip.ip }}' + register: move_ip +- name: Verify move floating IPv4 to second server + assert: + that: + - move_ip is changed + - move_ip.server == test02.uuid + +- name: Remove floating IP in check mode + cloudscale_ch.cloud.floating_ip: + ip: '{{ floating_ip.ip }}' + state: 'absent' + register: release_ip + check_mode: true +- name: Verify Remove floating IP in check mode + assert: + that: + - release_ip is changed + - release_ip.state == 'present' + +- name: Remove floating IP + cloudscale_ch.cloud.floating_ip: + ip: '{{ floating_ip.ip }}' + state: 'absent' + register: release_ip +- name: Verify Remove floating IP + assert: + that: + - release_ip is changed + - release_ip.state == 'absent' + +- name: Remove floating IP idempotence + cloudscale_ch.cloud.floating_ip: + ip: '{{ floating_ip.ip }}' + state: 'absent' + register: release_ip +- name: Verify Remove floating IP idempotence + assert: + that: + - release_ip is not changed + - release_ip.state == 'absent' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml new file mode 100644 index 00000000..a7046ae9 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: Cloudscale floating IP tests + block: + - name: Create a server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test01' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + register: test01 + + - name: Create a second server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test02' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + register: test02 + + - include_tasks: floating_ip.yml + loop: + - { 'ip_version': 4, 'reverse_ptr': '{{ cloudscale_resource_prefix }}-4.example.com' } + - { 'ip_version': 6, 'reverse_ptr': '{{ cloudscale_resource_prefix }}-6.example.com' } + - { 'ip_version': 6, 'prefix_length': 56 } + + - import_tasks: unassigned.yml + + always: + - import_role: + name: common + tasks_from: cleanup_servers + - import_role: + name: common + tasks_from: cleanup_floating_ips diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml new file mode 100644 index 00000000..4ee4ed08 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/floating_ip/tasks/unassigned.yml @@ -0,0 +1,27 @@ +--- +- name: Assign Floating IP to server test01 + cloudscale_ch.cloud.floating_ip: + name: '{{ cloudscale_resource_prefix }}-unassigned' + ip_version: 6 + server: '{{ test01.uuid }}' + reverse_ptr: '{{ cloudscale_resource_prefix }}-unassigned.example.com' + region: '{{ cloudscale_test_region }}' + register: floating_ip + +# The only way to have an unassigned floating IP is to delete the server +# where the floating IP is currently assigned. +- name: Delete server test01 + cloudscale_ch.cloud.server: + uuid: '{{ test01.uuid }}' + state: 'absent' + +- name: Do not fail if floating IP is unassigned + cloudscale_ch.cloud.floating_ip: + ip: '{{ floating_ip.ip }}' + register: floating_ip_not_fail +- name: Verify do not fail if floating IP is unassigned + assert: + that: + - floating_ip_not_fail is successful + - floating_ip_not_fail is not changed + - floating_ip_not_fail.server == None diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml new file mode 100644 index 00000000..57efebd5 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/failures.yml @@ -0,0 +1,44 @@ +--- +- name: Fail missing params + cloudscale_ch.cloud.network: + register: net + ignore_errors: True +- name: 'VERIFY: Fail name and UUID' + assert: + that: + - net is failed + +- name: Create two networks with the same name + uri: + url: 'https://api.cloudscale.ch/v1/networks' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate name + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-duplicate' + register: net + ignore_errors: True +- name: 'VERIFY: Try access to duplicate name' + assert: + that: + - net is failed + - net.msg.startswith("More than one networks resource with 'name' exists") + +- name: Fail network creation with UUID + cloudscale_ch.cloud.network: + uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + register: net + ignore_errors: True +- name: 'VERIFY: Fail network creation with UUID' + assert: + that: + - net is failed + - net.msg.startswith("state is present but all of the following are missing") diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml new file mode 100644 index 00000000..a89bd140 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- block: + - import_tasks: failures.yml + - import_tasks: tests.yml + always: + - import_role: + name: common + tasks_from: cleanup_networks diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml new file mode 100644 index 00000000..926a452d --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/network/tasks/tests.yml @@ -0,0 +1,159 @@ +--- +- name: Create network in check mode + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: net + check_mode: yes +- name: 'VERIFY: Create network in check mode' + assert: + that: + - net is changed + - net.name == '{{ cloudscale_resource_prefix }}-net' + - not net.uuid + +- name: Create network + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: net +- name: 'VERIFY: Create network' + assert: + that: + - net is changed + - net.mtu == 9000 + - net.name == '{{ cloudscale_resource_prefix }}-net' + - net.zone.slug == cloudscale_test_zone + - net.uuid + - net.tags.project == 'ansible-test' + - net.tags.stage == 'production' + - net.tags.sla == '24-7' + +- name: Remember uuid + set_fact: + network_uuid: '{{ net.uuid }}' + +- name: Create network idempotence + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: net +- name: 'VERIFY: Create network idempotence' + assert: + that: + - net is not changed + - net.name == '{{ cloudscale_resource_prefix }}-net' + - net.zone.slug == cloudscale_test_zone + - net.uuid == network_uuid + - net.tags.project == 'ansible-test' + - net.tags.stage == 'production' + - net.tags.sla == '24-7' + +- name: Update network in check mode + cloudscale_ch.cloud.network: + uuid: '{{ network_uuid }}' + name: '{{ cloudscale_resource_prefix }}-net2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: net + check_mode: yes +- name: 'VERIFY: Update network in check mode' + assert: + that: + - net is changed + - net.name == '{{ cloudscale_resource_prefix }}-net' + - net.uuid == network_uuid + - net.zone.slug == cloudscale_test_zone + - net.tags.project == 'ansible-test' + - net.tags.stage == 'production' + - net.tags.sla == '24-7' + +- name: Update network + cloudscale_ch.cloud.network: + uuid: '{{ network_uuid }}' + name: '{{ cloudscale_resource_prefix }}-net2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: net +- name: 'VERIFY: Update network' + assert: + that: + - net is changed + - net.name == '{{ cloudscale_resource_prefix }}-net2' + - net.uuid == network_uuid + - net.zone.slug == cloudscale_test_zone + - net.tags.project == 'ansible-test' + - net.tags.stage == 'staging' + - net.tags.sla == '8-5' + +- name: Update network idempotence + cloudscale_ch.cloud.network: + uuid: '{{ network_uuid }}' + name: '{{ cloudscale_resource_prefix }}-net2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: net +- name: 'VERIFY: Update network idempotence' + assert: + that: + - net is not changed + - net.name == '{{ cloudscale_resource_prefix }}-net2' + - net.uuid == network_uuid + - net.zone.slug == cloudscale_test_zone + - net.tags.project == 'ansible-test' + - net.tags.stage == 'staging' + - net.tags.sla == '8-5' + +- name: Delete network in check mode + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net2' + state: absent + register: net + check_mode: yes +- name: 'VERIFY: Delete network in check mode' + assert: + that: + - net is changed + - net.name == '{{ cloudscale_resource_prefix }}-net2' + - net.uuid == network_uuid + +- name: Delete network + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net2' + state: absent + register: net +- name: 'VERIFY: Delete network' + assert: + that: + - net is changed + - net.name == '{{ cloudscale_resource_prefix }}-net2' + - net.uuid == network_uuid + +- name: Delete network idempotence + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net2' + state: absent + register: net +- name: 'VERIFY: Delete network idempotence' + assert: + that: + - net is not changed + - net.name == '{{ cloudscale_resource_prefix }}-net2' + - not net.uuid diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml new file mode 100644 index 00000000..42d22d0d --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/failures.yml @@ -0,0 +1,44 @@ +--- +- name: Fail missing params + cloudscale_ch.cloud.objects_user: + register: obj + ignore_errors: True +- name: 'VERIFY: Fail name and UUID' + assert: + that: + - obj is failed + +- name: Create two objects user with the same display_name + uri: + url: 'https://api.cloudscale.ch/v1/objects-users' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + display_name: '{{ cloudscale_resource_prefix }}-duplicate' + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate display_name + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-duplicate' + register: obj + ignore_errors: True +- name: 'VERIFY: Try access to duplicate name' + assert: + that: + - obj is failed + - obj.msg.startswith("More than one objects-users resource with 'display_name' exists") + +- name: Fail creation with ID + cloudscale_ch.cloud.objects_user: + id: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + register: obj + ignore_errors: True +- name: 'VERIFY: Fail server group creation with ID' + assert: + that: + - obj is failed + - obj.msg.startswith("state is present but all of the following are missing") diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml new file mode 100644 index 00000000..69171378 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- block: + - import_tasks: failures.yml + - import_tasks: tests.yml + always: + - import_role: + name: common + tasks_from: cleanup_objects_users diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml new file mode 100644 index 00000000..b77921aa --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/objects_user/tasks/tests.yml @@ -0,0 +1,151 @@ +--- +- name: Create objects user in check mode + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-obj' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: obj + check_mode: yes +- name: 'VERIFY: Create objects user in check mode' + assert: + that: + - obj is changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj' + - not obj.id + +- name: Create objects user + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-obj' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: obj +- name: 'VERIFY: Create objects user' + assert: + that: + - obj is changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj' + - obj.id + - obj.tags.project == 'ansible-test' + - obj.tags.stage == 'production' + - obj.tags.sla == '24-7' + +- name: Remember uuid + set_fact: + objects_user_id: '{{ obj.id }}' + +- name: Create objects user idempotence + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-obj' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: obj +- name: 'VERIFY: Create objects user idempotence' + assert: + that: + - obj is not changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj' + - obj.id == objects_user_id + - obj.tags.project == 'ansible-test' + - obj.tags.stage == 'production' + - obj.tags.sla == '24-7' + +- name: Update objects user in check mode + cloudscale_ch.cloud.objects_user: + id: '{{ objects_user_id }}' + display_name: '{{ cloudscale_resource_prefix }}-obj2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: obj + check_mode: yes +- name: 'VERIFY: Update objects user in check mode' + assert: + that: + - obj is changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj' + - obj.id == objects_user_id + - obj.tags.project == 'ansible-test' + - obj.tags.stage == 'production' + - obj.tags.sla == '24-7' + +- name: Update objects user + cloudscale_ch.cloud.objects_user: + id: '{{ objects_user_id }}' + display_name: '{{ cloudscale_resource_prefix }}-obj2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: obj +- name: 'VERIFY: Update objects user' + assert: + that: + - obj is changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2' + - obj.id == objects_user_id + - obj.tags.project == 'ansible-test' + - obj.tags.stage == 'staging' + - obj.tags.sla == '8-5' + +- name: Update objects user idempotence + cloudscale_ch.cloud.objects_user: + id: '{{ objects_user_id }}' + display_name: '{{ cloudscale_resource_prefix }}-obj2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: obj +- name: 'VERIFY: Update objects user idempotence' + assert: + that: + - obj is not changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2' + - obj.id == objects_user_id + - obj.tags.project == 'ansible-test' + - obj.tags.stage == 'staging' + - obj.tags.sla == '8-5' + +- name: Delete objects user in check mode + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-obj2' + state: absent + register: obj + check_mode: yes +- name: 'VERIFY: Delete objects user in check mode' + assert: + that: + - obj is changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2' + - obj.id == objects_user_id + +- name: Delete objects user + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-obj2' + state: absent + register: obj +- name: 'VERIFY: Delete objects user' + assert: + that: + - obj is changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2' + - obj.id == objects_user_id + +- name: Delete objects user idempotence + cloudscale_ch.cloud.objects_user: + display_name: '{{ cloudscale_resource_prefix }}-obj2' + state: absent + register: obj +- name: 'VERIFY: Delete objects user idempotence' + assert: + that: + - obj is not changed + - obj.display_name == '{{ cloudscale_resource_prefix }}-obj2' + - not obj.id diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml new file mode 100644 index 00000000..a1ba98be --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cloudscale_test_flavor_2: flex-4-4 diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml new file mode 100644 index 00000000..8d9ebee0 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/failures.yml @@ -0,0 +1,53 @@ +--- +- name: Fail missing params + cloudscale_ch.cloud.server: + register: srv + ignore_errors: True +- name: 'VERIFY: Fail name and UUID' + assert: + that: + - srv is failed + +- name: Fail unexisting server group + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-group' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + server_groups: '{{ cloudscale_resource_prefix }}-unexist-group' + ignore_errors: True + register: srv +- name: 'VERIFY: Fail unexisting server group' + assert: + that: + - srv is failed + - srv.msg.startswith('Server group name or UUID not found') + +- name: Create two server groups with the same name + uri: + url: https://api.cloudscale.ch/v1/server-groups + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + type: anti-affinity + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try to use server groups with identical name + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-group' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + server_groups: '{{ cloudscale_resource_prefix }}-duplicate' + ignore_errors: True + register: srv +- name: 'VERIFY: Fail unexisting server group' + assert: + that: + - srv is failed + - srv.msg.startswith('More than one server group with name exists') diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml new file mode 100644 index 00000000..9f7641bd --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- block: + - import_tasks: failures.yml + - import_tasks: tests.yml + always: + - import_role: + name: common + tasks_from: cleanup_servers + - import_role: + name: common + tasks_from: cleanup_server_groups + - import_role: + name: common + tasks_from: cleanup_networks diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml new file mode 100644 index 00000000..7d9c33bd --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server/tasks/tests.yml @@ -0,0 +1,991 @@ +--- +- name: Setup server groups + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-group-{{ item }}' + type: anti-affinity + zone: '{{ cloudscale_test_zone }}' + with_sequence: count=2 + +- name: Test create a running server in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: server + check_mode: yes +- name: Verify create a running server in check mode + assert: + that: + - server is changed + - server.state == 'absent' + +- name: Test create a running server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: server +- name: Verify create a running server + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.zone.slug == '{{ cloudscale_test_zone }}' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'production' + - server.tags.sla == '24-7' + +- name: Test create a running server idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: server +- name: Verify create a running server idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.zone.slug == '{{ cloudscale_test_zone }}' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'production' + - server.tags.sla == '24-7' + +- name: Test update tags in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: server + check_mode: yes +- name: Verify update tags in check mode + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'production' + - server.tags.sla == '24-7' + +- name: Test update tags + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: server +- name: Verify update tags + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'staging' + - server.tags.sla == '8-5' + +- name: Test update tags idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: server +- name: Verify update tags idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'staging' + - server.tags.sla == '8-5' + +- name: Test omit tags idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + register: server +- name: Verify update tags idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'staging' + - server.tags.sla == '8-5' + +- name: Test delete tags + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: {} + register: server +- name: Verify delete tags + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - not server.tags + +- name: Test delete tags idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: {} + register: server +- name: Verify delete tags idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - not server.tags + +- name: Test update flavor of a running server without force in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: no + register: server + check_mode: yes +- name: Verify update flavor of a running server without force in check mode + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test update flavor of a running server without force + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor of a running server without force + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test update flavor of a running server without force idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor of a running server without force idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test update flavor and name of a running server without force in check mode + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: no + register: server + check_mode: yes +- name: Verify update flavor and name of a running server without force in check mode + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test' + +- name: Test update flavor and name of a running server without force + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor and name of a running server without force + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update flavor and name of a running server without force idempotence + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor and name of a running server without force idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update flavor of a running server with force in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: yes + register: server + check_mode: yes +- name: Verify update flavor of a running server with force in check mode + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update flavor of a running server with force + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: yes + register: server +- name: Verify update flavor of a running server with force + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update a running server with force idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + force: yes + register: server +- name: Verify update flavor of a running server with force idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Remember uuid of running server for anti affinity + set_fact: + running_server_uuid: '{{ server.uuid }}' + +- name: Test create server stopped in anti affinity and private network only in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + use_public_network: no + use_private_network: yes + state: stopped + check_mode: yes + register: server_stopped +- name: Verify create server stopped in anti affinity and private network only in check mode + assert: + that: + - server_stopped is changed + - server_stopped.state == 'absent' + +- name: Test create server stopped in anti affinity and private network only + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + use_public_network: no + use_private_network: yes + state: stopped + register: server_stopped +- name: Verify create server stopped in anti affinity and private network only + assert: + that: + - server_stopped is changed + - server_stopped.state == 'stopped' + - server_stopped.zone.slug == '{{ cloudscale_test_zone }}' + - server_stopped.anti_affinity_with.0.uuid == running_server_uuid + - server_stopped.interfaces.0.type == 'private' + - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test create server stopped in anti affinity and private network only idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + use_public_network: no + use_private_network: yes + state: stopped + register: server_stopped +- name: Verify create server stopped in anti affinity and private network only idempotence + assert: + that: + - server_stopped is not changed + - server_stopped.state == 'stopped' + - server_stopped.zone.slug == '{{ cloudscale_test_zone }}' + - server_stopped.anti_affinity_with.0.uuid == running_server_uuid + - server_stopped.interfaces.0.type == 'private' + - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test change server group not changed + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-2' + use_public_network: no + use_private_network: yes + state: stopped + register: server_stopped +- name: Verify Test update server group not changed + assert: + that: + - server_stopped is not changed + - server_stopped.state == 'stopped' + - server_stopped.zone.slug == '{{ cloudscale_test_zone }}' + - server_stopped.anti_affinity_with.0.uuid == running_server_uuid + - server_stopped.interfaces.0.type == 'private' + - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test create server with password in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-password' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + check_mode: yes + register: server_password +- name: Verify create server with password in check mode + assert: + that: + - server_password is changed + - server_password.state == 'absent' + # Verify password is not logged + - server_password.diff.after.password != cloudscale_test_password + +- name: Test create server with password + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-password' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + register: server_password +- name: Verify create server with password + assert: + that: + - server_password is changed + - server_password.state == 'running' + # Verify password is not logged + - server_password.diff.after.password != cloudscale_test_password + +- name: Test create server with password idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-password' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + register: server_password +- name: Verify create server with password idempotence + assert: + that: + - server_password is not changed + - server_password.state == 'running' + +- name: Test create server failure without required parameters + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-failed' + register: server_failed + ignore_errors: yes +- name: Verify create server failure without required parameters + assert: + that: + - server_failed is failed + - "'Failure while calling the cloudscale.ch API with POST' in server_failed.msg" + - "'This field is required.' in server_failed.fetch_url_info.body" + +- name: Test stop running server in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + state: stopped + check_mode: yes + register: server +- name: Verify stop running server in check mode + assert: + that: + - server is changed + - server.state == 'running' + +- name: Test stop running server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + state: stopped + register: server +- name: Verify stop running server + assert: + that: + - server is changed + - server.state == 'stopped' + +- name: Test stop running server idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + state: 'stopped' + register: server +- name: Verify stop running server idempotence + assert: + that: + - server is not changed + - server.state == 'stopped' + +- name: Test update a stopped server in check mode + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + state: stopped + register: server + check_mode: yes +- name: Verify update a stopped server in check mode + assert: + that: + - server is changed + - server.state == 'stopped' + - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update a stopped server without force + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + state: stopped + register: server +- name: Verify update a stopped server without force + assert: + that: + - server is changed + - server.state == 'stopped' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test' + +- name: Test update a stopped server idempotence + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + state: stopped + register: server +- name: Verify update a stopped server idempotence + assert: + that: + - server is not changed + - server.state == 'stopped' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test' + +- name: Test server running in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + state: running + register: server + check_mode: yes +- name: Verify server running in check mode + assert: + that: + - server is changed + - server.state == 'stopped' + +- name: Test server running + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + state: running + register: server +- name: Verify server running + assert: + that: + - server is changed + - server.state == 'running' + +- name: Test server running idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + state: running + register: server +- name: Verify server running idempotence + assert: + that: + - server is not changed + - server.state == 'running' + +- name: Test running server deletion by name in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + state: absent + register: server + check_mode: yes +- name: Verify running server deletion by name in check mode + assert: + that: + - server is changed + - server.state == 'running' + +- name: Test running server deletion by name + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + state: absent + register: server +- name: Verify running server deletion by name + assert: + that: + - server is changed + - server.state == 'absent' + +- name: Test running server deletion by name idempotence + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test' + state: absent + register: server +- name: Verify running server deletion by name idempotence + assert: + that: + - server is not changed + - server.state == 'absent' + +- name: Test stopped server deletion by uuid in check mode + cloudscale_ch.cloud.server: + uuid: '{{ server_stopped.uuid }}' + state: absent + register: server_stopped + check_mode: yes +- name: Verify stopped server deletion by uuid in check mode + assert: + that: + - server_stopped is changed + - server_stopped.state == 'stopped' + +- name: Test stopped server deletion by uuid + cloudscale_ch.cloud.server: + uuid: '{{ server_stopped.uuid }}' + state: absent + register: server_stopped +- name: Verify stopped server deletion by uuid + assert: + that: + - server_stopped is changed + - server_stopped.state == 'absent' + +- name: Test stopped server deletion by uuid idempotence + cloudscale_ch.cloud.server: + uuid: '{{ server_stopped.uuid }}' + state: absent + register: server_stopped +- name: Verify stopped server deletion by uuid idempotence + assert: + that: + - server_stopped is not changed + - server_stopped.state == 'absent' + +- name: Create first private network + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-test-interface-network' + state: present + zone: '{{ cloudscale_test_zone }}' + register: test_interface_network + +- name: Create second private network + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-test-interface-network1' + state: present + zone: '{{ cloudscale_test_zone }}' + register: test_interface_network1 + +- name: Create server in specific private network in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + state: running + interfaces: + - network: '{{ test_interface_network.uuid }}' + check_mode: yes + register: server +- name: Verify Create server in specific private network in check mode + assert: + that: + - server.changed == True + - server.state == 'absent' + +- name: Create server in specific private network + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + state: running + interfaces: + - network: '{{ test_interface_network.uuid }}' + register: server +- name: Verify Create server in specific private network + assert: + that: + - server.changed == True + - server.state == 'running' + - server.interfaces.0.network.name == '{{ cloudscale_resource_prefix }}-test-interface-network' + +- name: Create server in specific private network idempotency + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + state: running + interfaces: + - network: '{{ test_interface_network.uuid }}' + register: server +- name: Verify Create server in specific private network idempotency + assert: + that: + - server.changed == False + - server.state == 'running' + - server.interfaces.0.network.name == '{{ cloudscale_resource_prefix }}-test-interface-network' + +- name: Remove private IP and add public network from server in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - network: '{{ test_interface_network.uuid }}' + addresses: [] + check_mode: yes + register: server +- name: Verify Remove private IP and add public network from server in check mode + assert: + that: + - server.changed == True + - server.interfaces.0.network.name == '{{ cloudscale_resource_prefix }}-test-interface-network' + +- name: Remove private IP and add public network from server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - network: '{{ test_interface_network.uuid }}' + addresses: [] + register: server2 +- name: Verify Remove private IP and add public network from server + assert: + that: + - server2.changed + - server2.interfaces | selectattr("type", "eq", "public") | list | length == 1 + - server2.interfaces | selectattr("type", "eq", "private") | list | length == 1 + - (server2.interfaces | selectattr("type", "eq", "private") | first).addresses == [] + +- name: Remove private IP and add public network from server idempotency + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - network: '{{ test_interface_network.uuid }}' + addresses: [] + register: server +- name: Verify Remove private IP and add public network from server in check mode + assert: + that: + - server.changed == False + +- name: Remove private network from server in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + check_mode: True + register: server +- name: Verify Remove private network from server in check mode + assert: + that: + - server.changed == True + - server.interfaces | selectattr("type", "eq", "public") | list | length == 1 + - server.interfaces | selectattr("type", "eq", "private") | list | length == 1 + +- name: Remove private network from server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + register: server +- name: Verify Remove private network from server + assert: + that: + - server.changed + - server.interfaces | selectattr("type", "eq", "public") | list | length == 1 + - server.interfaces | selectattr("type", "eq", "private") | list | length == 0 + +- name: Remove private network from server idempotency + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + register: server +- name: Verify Remove private network from server idempotency + assert: + that: + - server.changed == False + +- name: Attach specific private network to server in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - network: "{{ test_interface_network1.uuid }}" + check_mode: True + register: server +- name: Verify Attach specific private network to server in check mode + assert: + that: + - server.changed == True + - server.interfaces | selectattr("type", "eq", "public") | list | length == 1 + - server.interfaces | selectattr("type", "eq", "private") | list | length == 0 + +- name: Attach specific private network to server + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - network: "{{ test_interface_network1.uuid }}" + register: server +- name: Verify Attach specific private network to server + assert: + that: + - server.changed + - server.interfaces.1.network.uuid == test_interface_network1.uuid + +- name: Attach specific private network to server idempotency + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - network: "{{ test_interface_network1.uuid }}" + register: server +- name: Verify Attach specific private network to server + assert: + that: + - server.changed == False + +- name: Set fact private IP address + set_fact: + private_address0: "{{ test_interface_network1.subnets.0.cidr | regex_replace('0/24','31') }}" + private_address1: "{{ test_interface_network1.subnets.0.cidr | regex_replace('0/24','32') }}" + +- name: Set a static IP in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - addresses: + - subnet: "{{ test_interface_network1.subnets.0.uuid }}" + address: "{{ private_address0 }}" + check_mode: True + register: server_alt +- name: Verify Set a static IP check mode + assert: + that: + - server_alt.changed + - server.interfaces.1.addresses.0.address == server_alt.interfaces.1.addresses.0.address + +- name: Set a static IP + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - addresses: + - subnet: "{{ test_interface_network1.subnets.0.uuid }}" + address: "{{ private_address0 }}" + register: server +- name: Verify Set a static IP + assert: + that: + - server.changed + - private_address0 == server.interfaces.1.addresses.0.address + +- name: Set a static IP idempotency + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - addresses: + - subnet: "{{ test_interface_network1.subnets.0.uuid }}" + address: "{{ private_address0 }}" + register: server +- name: Verify Set a static IP idempotency + assert: + that: + - server.changed == False + - private_address0 == server.interfaces.1.addresses.0.address + +- name: Change a static IP in check mode + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - addresses: + - subnet: "{{ test_interface_network1.subnets.0.uuid }}" + address: "{{ private_address1 }}" + check_mode: True + register: server +- name: Verify Change a static IP check mode + assert: + that: + - server.changed + - private_address0 == server.interfaces.1.addresses.0.address + +- name: Change a static IP + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - addresses: + - subnet: "{{ test_interface_network1.subnets.0.uuid }}" + address: "{{ private_address1 }}" + register: server +- name: Verify Change a static IP + assert: + that: + - server.changed + - private_address1 == server.interfaces.1.addresses.0.address + +- name: Change a static IP idempotency + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-test-interface' + interfaces: + - network: 'public' + - addresses: + - subnet: "{{ test_interface_network1.subnets.0.uuid }}" + address: "{{ private_address1 }}" + register: server +- name: Verify Change a static IP idempotency + assert: + that: + - server.changed == False + - private_address1 == server.interfaces.1.addresses.0.address diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml new file mode 100644 index 00000000..b02ee031 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/failures.yml @@ -0,0 +1,45 @@ +--- +- name: Fail missing params + cloudscale_ch.cloud.server_group: + register: grp + ignore_errors: True +- name: 'VERIFY: Fail name and UUID' + assert: + that: + - grp is failed + +- name: Create two server groups with the same name + uri: + url: 'https://api.cloudscale.ch/v1/server-groups' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + type: 'anti-affinity' + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate name + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-duplicate' + register: grp + ignore_errors: True +- name: 'VERIFY: Try access to duplicate name' + assert: + that: + - grp is failed + - grp.msg.startswith("More than one server-groups resource with 'name' exists") + +- name: Fail server group creation with UUID + cloudscale_ch.cloud.server_group: + uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + register: grp + ignore_errors: True +- name: 'VERIFY: Fail server group creation with UUID' + assert: + that: + - grp is failed + - grp.msg.startswith("state is present but all of the following are missing") diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml new file mode 100644 index 00000000..44fe7303 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- block: + - import_tasks: failures.yml + - import_tasks: tests.yml + always: + - import_role: + name: common + tasks_from: cleanup_server_groups diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml new file mode 100644 index 00000000..018b85a5 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/server_group/tasks/tests.yml @@ -0,0 +1,180 @@ +--- +- name: Create server group in check mode + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp + check_mode: yes +- name: 'VERIFY: Create server group in check mode' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - not grp.uuid + +- name: Create server group + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp +- name: 'VERIFY: Create server group' + assert: + that: + - grp is changed + - grp.type == 'anti-affinity' + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.uuid + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Remember uuid + set_fact: + server_group_uuid: '{{ grp.uuid }}' + +- name: Create server group idempotence + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp +- name: 'VERIFY: Create server group idempotence' + assert: + that: + - grp is not changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.uuid == server_group_uuid + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Create server group with same name in alt zone + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + zone: '{{ cloudscale_test_alt_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp +- name: 'VERIFY:Create server group with same name in alt zone' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.zone.slug == '{{ cloudscale_test_alt_zone }}' + - grp.uuid != server_group_uuid + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Update server group in check mode + cloudscale_ch.cloud.server_group: + uuid: '{{ server_group_uuid }}' + name: '{{ cloudscale_resource_prefix }}-grp2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: grp + check_mode: yes +- name: 'VERIFY: Update server group in check mode' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.uuid == server_group_uuid + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Update server group + cloudscale_ch.cloud.server_group: + uuid: '{{ server_group_uuid }}' + name: '{{ cloudscale_resource_prefix }}-grp2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: grp +- name: 'VERIFY: Update server group' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'staging' + - grp.tags.sla == '8-5' + +- name: Update server group idempotence + cloudscale_ch.cloud.server_group: + uuid: '{{ server_group_uuid }}' + name: '{{ cloudscale_resource_prefix }}-grp2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: grp +- name: 'VERIFY: Update server group idempotence' + assert: + that: + - grp is not changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'staging' + - grp.tags.sla == '8-5' + +- name: Delete server group in check mode + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp2' + state: absent + register: grp + check_mode: yes +- name: 'VERIFY: Delete server group in check mode' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + +- name: Delete server group + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp2' + state: absent + register: grp +- name: 'VERIFY: Delete server group' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + +- name: Delete server group idempotence + cloudscale_ch.cloud.server_group: + name: '{{ cloudscale_resource_prefix }}-grp2' + state: absent + register: grp +- name: 'VERIFY: Delete server group idempotence' + assert: + that: + - grp is not changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - not grp.uuid diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml new file mode 100644 index 00000000..67d807cd --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cloudscale_subnet_cidr: 192.168.23.0/24 diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml new file mode 100644 index 00000000..c28f5ab0 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/failures.yml @@ -0,0 +1,82 @@ +--- +- name: Fail missing params + cloudscale_ch.cloud.subnet: + register: snet + ignore_errors: True +- name: 'VERIFY: Fail missing params' + assert: + that: + - snet is failed + - snet.msg.startswith("one of the following is required") + +- name: Fail creation with UUID + cloudscale_ch.cloud.subnet: + uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + register: snet + ignore_errors: True +- name: 'VERIFY: Fail creation with UUID' + assert: + that: + - snet is failed + - snet.msg.startswith("state is present but all of the following are missing") + +- name: Fail creation unknown network name + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-does-not-exist' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: snet + ignore_errors: True +- name: 'VERIFY: Fail creation unknown network name' + assert: + that: + - snet is failed + - snet.msg.startswith("Network with 'name' not found") + +- name: Fail creation unknown network uuid + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + uuid: 'f0bb5270-f66c-41d6-ac3b-a223cd280ced' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: snet + ignore_errors: True +- name: 'VERIFY: Fail creation unknown network uuid' + assert: + that: + - snet is failed + - snet.msg.startswith("Network with 'uuid' not found") + +- name: Create two networks with the same name + uri: + url: 'https://api.cloudscale.ch/v1/networks' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + auto_create_ipv4_subnet: False + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate network name + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-duplicate' + register: snet + ignore_errors: True +- name: 'VERIFY: Try access to duplicate network name' + assert: + that: + - snet is failed + - snet.msg.startswith("Multiple networks with 'name' not found") diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml new file mode 100644 index 00000000..54078ffa --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Subnet test run + block: + - import_tasks: failures.yml + - import_tasks: setup.yml + - import_tasks: tests.yml + always: + - import_role: + name: common + tasks_from: cleanup_networks diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml new file mode 100644 index 00000000..4662f9e7 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/setup.yml @@ -0,0 +1,13 @@ +--- +- name: Setup network in alt zone + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net' + auto_create_ipv4_subnet: false + zone: '{{ cloudscale_test_alt_zone }}' + +- name: Setup network in test zone + cloudscale_ch.cloud.network: + name: '{{ cloudscale_resource_prefix }}-net' + auto_create_ipv4_subnet: false + zone: '{{ cloudscale_test_zone }}' + register: net diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml new file mode 100644 index 00000000..5e0ff6b1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/subnet/tasks/tests.yml @@ -0,0 +1,243 @@ +--- +- name: Create subnet in check mode + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: snet + check_mode: yes +- name: 'VERIFY: Create subnet in check mode' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - not snet.uuid + +- name: Create subnet + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: snet +- name: 'VERIFY: Create subnet' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'production' + - snet.tags.sla == '24-7' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Remember subnet uuid + set_fact: + subnet_uuid: '{{ snet.uuid }}' + +- name: Create subnet idempotence + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: snet +- name: 'VERIFY: Create subnet idempotence' + assert: + that: + - snet is not changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'production' + - snet.tags.sla == '24-7' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Update subnet in check mode + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: snet + check_mode: yes +- name: 'VERIFY: Update subnet in check mode' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'production' + - snet.tags.sla == '24-7' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Update subnet + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: snet +- name: 'VERIFY: Update subnet' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'staging' + - snet.tags.sla == '8-5' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Update subnet idempotence + cloudscale_ch.cloud.subnet: + uuid: '{{ subnet_uuid }}' + cidr: '{{ cloudscale_subnet_cidr }}' + network: + name: '{{ cloudscale_resource_prefix }}-net' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: snet +- name: 'VERIFY: Update subnet idempotence' + assert: + that: + - snet is not changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'staging' + - snet.tags.sla == '8-5' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Update subnet by Network UUID + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + uuid: '{{ net.uuid }}' + dns_servers: + - 9.9.9.9 + - 8.8.8.8 + gateway_address: 192.168.23.1 + register: snet +- name: 'VERIFY: Update subnet' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.dns_servers == ['9.9.9.9', '8.8.8.8'] + - snet.gateway_address == '192.168.23.1' + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'staging' + - snet.tags.sla == '8-5' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Update subnet by Network UUID idempotence + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + uuid: '{{ net.uuid }}' + dns_servers: + - 9.9.9.9 + - 8.8.8.8 + gateway_address: 192.168.23.1 + register: snet +- name: 'VERIFY: Update subnet' + assert: + that: + - snet is not changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.dns_servers == ['9.9.9.9', '8.8.8.8'] + - snet.gateway_address == '192.168.23.1' + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'staging' + - snet.tags.sla == '8-5' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Reset DNS servers in subnet + cloudscale_ch.cloud.subnet: + cidr: '{{ cloudscale_subnet_cidr }}' + network: + uuid: '{{ net.uuid }}' + gateway_address: 192.168.23.1 + reset: true + register: snet +- name: 'VERIFY: Update subnet' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.dns_servers != ['9.9.9.9', '8.8.8.8'] + - snet.gateway_address == '192.168.23.1' + - snet.tags.project == 'ansible-test' + - snet.tags.stage == 'staging' + - snet.tags.sla == '8-5' + - snet.network.zone.slug == cloudscale_test_zone + +- name: Delete subnet in check mode + cloudscale_ch.cloud.subnet: + uuid: "{{ snet.uuid }}" + state: absent + register: snet + check_mode: yes +- name: 'VERIFY: Delete subnet in check mode' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.state == "present" + - snet.network.zone.slug == cloudscale_test_zone + +- name: Delete subnet + cloudscale_ch.cloud.subnet: + uuid: "{{ snet.uuid }}" + state: absent + register: snet +- name: 'VERIFY: Delete subnet' + assert: + that: + - snet is changed + - snet.cidr == cloudscale_subnet_cidr + - snet.uuid == subnet_uuid + - snet.state == "absent" + - snet.network.zone.slug == cloudscale_test_zone + +- name: Delete subnet idempotence + cloudscale_ch.cloud.subnet: + uuid: "{{ snet.uuid }}" + state: absent + register: snet +- name: 'VERIFY: Delete subnet idempotence' + assert: + that: + - snet is not changed + - snet.uuid == subnet_uuid + - snet.state == "absent" diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases new file mode 100644 index 00000000..c200a3d2 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml new file mode 100644 index 00000000..2083f0e1 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml new file mode 100644 index 00000000..e7abce57 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/cleanup.yml @@ -0,0 +1,5 @@ +--- +- name: Remove test server + cloudscale_ch.cloud.server: + uuid: '{{ server.uuid }}' + state: 'absent' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml new file mode 100644 index 00000000..8dfa534a --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/deprecation_warning.yml @@ -0,0 +1,31 @@ +--- +# TODO To be removed in version 3.0.0 +- name: Test server_uuid deprecation warning + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + server_uuid: + - 'd231db77-fdb3-4301-ae7c-f68ca2574496' + check_mode: True + register: vol +- name: Verify server_uuid deprecation waring + assert: + that: + - vol is changed + - '"deprecations" in vol' + +- name: Test server_uuids deprecation warning + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + server_uuids: + - 'd231db77-fdb3-4301-ae7c-f68ca2574496' + check_mode: True + register: vol +- name: Verify server_uuids deprecation waring + assert: + that: + - vol is changed + - '"deprecations" in vol' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml new file mode 100644 index 00000000..4962a7a7 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/failures.yml @@ -0,0 +1,38 @@ +--- +- name: Create two volumes with the same name + uri: + url: 'https://api.cloudscale.ch/v1/volumes' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + size_gb: 50 + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate name + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-duplicate' + size_gb: 10 + register: vol + ignore_errors: True +- name: 'VERIFY: Try access to duplicate name' + assert: + that: + - vol is failed + +- name: Fail volume creation with UUID + cloudscale_ch.cloud.volume: + uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + name: '{{ cloudscale_resource_prefix }}-inexistent' + size_gb: 10 + register: vol + ignore_errors: True +- name: 'VERIFY: Fail volume creation with UUID' + assert: + that: + - vol is failed + - vol.msg.startswith("The resource with UUID 'ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48' was not found") diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml new file mode 100644 index 00000000..21ea7386 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- block: + - import_tasks: setup.yml + - import_tasks: failures.yml + - import_tasks: tests.yml + - import_tasks: deprecation_warning.yml + always: + - import_role: + name: common + tasks_from: cleanup_servers + - import_role: + name: common + tasks_from: cleanup_volumes diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml new file mode 100644 index 00000000..9860e0b5 --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/setup.yml @@ -0,0 +1,10 @@ +--- +- name: Create test instance + cloudscale_ch.cloud.server: + name: '{{ cloudscale_resource_prefix }}-server' + flavor: '{{ cloudscale_test_flavor }}' + zone: '{{ cloudscale_test_zone }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: + - '{{ cloudscale_test_ssh_key }}' + register: server diff --git a/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml new file mode 100644 index 00000000..d72e731a --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/integration/targets/volume/tasks/tests.yml @@ -0,0 +1,260 @@ +--- +- name: Create volume in check mode + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + tags: + project: ansible-test + stage: production + sla: 24-7 + check_mode: yes + register: vol +- name: 'VERIFY: Create volume in check mode' + assert: + that: + - vol is successful + - vol is changed + - vol.state == 'absent' + +- name: Create volume + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + tags: + project: ansible-test + stage: production + sla: 24-7 + register: vol +- name: 'VERIFY: Create volume' + assert: + that: + - vol is successful + - vol is changed + - vol.size_gb == 50 + - vol.name == '{{ cloudscale_resource_prefix }}-vol' + - vol.zone.slug == '{{ cloudscale_test_zone }}' + - vol.tags.project == 'ansible-test' + - vol.tags.stage == 'production' + - vol.tags.sla == '24-7' + +- name: Create volume idempotence + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + tags: + project: ansible-test + stage: production + sla: 24-7 + register: vol +- name: 'VERIFY: Create volume idempotence' + assert: + that: + - vol is successful + - vol is not changed + - vol.size_gb == 50 + - vol.name == '{{ cloudscale_resource_prefix }}-vol' + - vol.zone.slug == '{{ cloudscale_test_zone }}' + - vol.tags.project == 'ansible-test' + - vol.tags.stage == 'production' + - vol.tags.sla == '24-7' + +- name: Attach existing volume by name to server in check mode + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + servers: + - '{{ server.uuid }}' + check_mode: yes + register: vol +- name: 'VERIFY: Attach existing volume by name to server in check mode' + assert: + that: + - vol is successful + - vol is changed + - server.uuid not in vol.servers + +- name: Attach existing volume by name to server + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + servers: + - '{{ server.uuid }}' + register: vol +- name: 'VERIFY: Attach existing volume by name to server' + assert: + that: + - vol is successful + - vol is changed + - server.uuid in vol.servers | map(attribute="uuid") + +- name: Attach existing volume by name to server idempotence + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-vol' + servers: + - '{{ server.uuid }}' + register: vol +- name: 'VERIFY: Attach existing volume by name to server idempotence' + assert: + that: + - vol is successful + - vol is not changed + - server.uuid in vol.servers | map(attribute="uuid") + +- name: Resize attached volume by UUID in check mode + cloudscale_ch.cloud.volume: + uuid: '{{ vol.uuid }}' + size_gb: 100 + check_mode: yes + register: vol +- name: 'VERIFY: Resize attached volume by UUID in check mode' + assert: + that: + - vol is successful + - vol is changed + - vol.size_gb == 50 + +- name: Resize attached volume by UUID + cloudscale_ch.cloud.volume: + uuid: '{{ vol.uuid }}' + size_gb: 100 + register: vol +- name: 'VERIFY: Resize attached volume by UUID' + assert: + that: + - vol is successful + - vol is changed + - vol.size_gb == 100 + +- name: Resize attached volume by UUID idempotence + cloudscale_ch.cloud.volume: + uuid: '{{ vol.uuid }}' + size_gb: 100 + register: vol +- name: 'VERIFY: Resize attached volume by UUID idempotence' + assert: + that: + - vol is successful + - vol is not changed + - vol.size_gb == 100 + +- name: Delete attached volume by UUID in check mode + cloudscale_ch.cloud.volume: + uuid: '{{ vol.uuid }}' + state: 'absent' + check_mode: yes + register: deleted +- name: 'VERIFY: Delete attached volume by UUID in check mode' + assert: + that: + - deleted is successful + - deleted is changed + - deleted.state == 'present' + - deleted.uuid == vol.uuid + - deleted.name == '{{ cloudscale_resource_prefix }}-vol' + +- name: Delete attached volume by UUID + cloudscale_ch.cloud.volume: + uuid: '{{ vol.uuid }}' + state: 'absent' + register: deleted +- name: 'VERIFY: Delete attached volume by UUID' + assert: + that: + - deleted is successful + - deleted is changed + - deleted.state == 'absent' + - deleted.uuid == vol.uuid + - deleted.name == '{{ cloudscale_resource_prefix }}-vol' + +- name: Delete attached volume by UUID idempotence + cloudscale_ch.cloud.volume: + uuid: '{{ vol.uuid }}' + state: 'absent' + register: deleted +- name: 'VERIFY: Delete attached volume by UUID idempotence' + assert: + that: + - deleted is successful + - deleted is not changed + - deleted.state == 'absent' + - deleted.uuid == vol.uuid + - not deleted.name + +- name: Create bulk volume and attach + cloudscale_ch.cloud.volume: + name: '{{ cloudscale_resource_prefix }}-bulk' + type: bulk + zone: '{{ cloudscale_test_zone }}' + size_gb: 100 + servers: + - '{{ server.uuid }}' + register: bulk +- name: 'VERIFY: Create bulk volume and attach' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.size_gb == 100 + - server.uuid in bulk.servers | map(attribute="uuid") + +- name: Detach volume by UUID + cloudscale_ch.cloud.volume: + uuid: '{{ bulk.uuid }}' + servers: [] + register: bulk +- name: 'VERIFY: Detach volume by UUID' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.servers == [] + +- name: Resize detached volume by name + cloudscale_ch.cloud.volume: + name: '{{ bulk.name }}' + size_gb: 200 + register: bulk +- name: 'VERIFY: Resize detached volume by name' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.size_gb == 200 + +- name: Delete volume by name in check mode + cloudscale_ch.cloud.volume: + name: '{{ bulk.name }}' + state: 'absent' + check_mode: yes + register: bulk +- name: 'VERIFY: Delete volume by name' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.state == 'present' + +- name: Delete volume by name + cloudscale_ch.cloud.volume: + name: '{{ bulk.name }}' + state: 'absent' + register: bulk +- name: 'VERIFY: Delete volume by name' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.state == 'absent' + +- name: Delete volume by name idempotence + cloudscale_ch.cloud.volume: + name: '{{ bulk.name }}' + state: 'absent' + register: bulk +- name: 'VERIFY: Delete volume by name idempotence' + assert: + that: + - bulk is successful + - bulk is not changed + - bulk.state == 'absent' diff --git a/ansible_collections/cloudscale_ch/cloud/tests/requirements.txt b/ansible_collections/cloudscale_ch/cloud/tests/requirements.txt new file mode 100644 index 00000000..4f6a277e --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/requirements.txt @@ -0,0 +1 @@ +netaddr diff --git a/ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt b/ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/cloudscale_ch/cloud/tests/sanity/ignore-2.10.txt |